1 /*
2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/cms/cmsHeap.hpp"
27 #include "gc/cms/cmsLockVerifier.hpp"
28 #include "gc/cms/compactibleFreeListSpace.hpp"
29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
30 #include "gc/cms/concurrentMarkSweepThread.hpp"
31 #include "gc/shared/blockOffsetTable.inline.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "gc/shared/genOopClosures.inline.hpp"
34 #include "gc/shared/space.inline.hpp"
35 #include "gc/shared/spaceDecorator.hpp"
36 #include "logging/log.hpp"
37 #include "logging/logStream.hpp"
38 #include "memory/allocation.inline.hpp"
39 #include "memory/binaryTreeDictionary.inline.hpp"
40 #include "memory/iterator.inline.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "oops/access.inline.hpp"
44 #include "oops/compressedOops.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/globals.hpp"
47 #include "runtime/handles.inline.hpp"
48 #include "runtime/init.hpp"
49 #include "runtime/java.hpp"
50 #include "runtime/orderAccess.hpp"
51 #include "runtime/vmThread.hpp"
52 #include "utilities/align.hpp"
53 #include "utilities/copy.hpp"
54 
55 // Specialize for AdaptiveFreeList which tries to avoid
56 // splitting a chunk of a size that is under populated in favor of
57 // an over populated size.  The general get_better_list() just returns
58 // the current list.
59 template <>
60 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
get_better_list(BinaryTreeDictionary<FreeChunk,::AdaptiveFreeList<FreeChunk>> * dictionary)61 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
62   BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
63   // A candidate chunk has been found.  If it is already under
64   // populated, get a chunk associated with the hint for this
65   // chunk.
66 
67   TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
68   if (curTL->surplus() <= 0) {
69     /* Use the hint to find a size with a surplus, and reset the hint. */
70     TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
71     while (hintTL->hint() != 0) {
72       assert(hintTL->hint() > hintTL->size(),
73         "hint points in the wrong direction");
74       hintTL = dictionary->find_list(hintTL->hint());
75       assert(curTL != hintTL, "Infinite loop");
76       if (hintTL == NULL ||
77           hintTL == curTL /* Should not happen but protect against it */ ) {
78         // No useful hint.  Set the hint to NULL and go on.
79         curTL->set_hint(0);
80         break;
81       }
82       assert(hintTL->size() > curTL->size(), "hint is inconsistent");
83       if (hintTL->surplus() > 0) {
84         // The hint led to a list that has a surplus.  Use it.
85         // Set the hint for the candidate to an overpopulated
86         // size.
87         curTL->set_hint(hintTL->size());
88         // Change the candidate.
89         curTL = hintTL;
90         break;
91       }
92     }
93   }
94   return curTL;
95 }
96 
dict_census_update(size_t size,bool split,bool birth)97 void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
98   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
99   if (nd) {
100     if (split) {
101       if (birth) {
102         nd->increment_split_births();
103         nd->increment_surplus();
104       }  else {
105         nd->increment_split_deaths();
106         nd->decrement_surplus();
107       }
108     } else {
109       if (birth) {
110         nd->increment_coal_births();
111         nd->increment_surplus();
112       } else {
113         nd->increment_coal_deaths();
114         nd->decrement_surplus();
115       }
116     }
117   }
118   // A list for this size may not be found (nd == 0) if
119   //   This is a death where the appropriate list is now
120   //     empty and has been removed from the list.
121   //   This is a birth associated with a LinAB.  The chunk
122   //     for the LinAB is not in the dictionary.
123 }
124 
coal_dict_over_populated(size_t size)125 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
126   if (FLSAlwaysCoalesceLarge) return true;
127 
128   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
129   // None of requested size implies overpopulated.
130   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
131          list_of_size->count() > list_of_size->coal_desired();
132 }
133 
134 // For each list in the tree, calculate the desired, desired
135 // coalesce, count before sweep, and surplus before sweep.
136 class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
137   double _percentage;
138   float _inter_sweep_current;
139   float _inter_sweep_estimate;
140   float _intra_sweep_estimate;
141 
142  public:
BeginSweepClosure(double p,float inter_sweep_current,float inter_sweep_estimate,float intra_sweep_estimate)143   BeginSweepClosure(double p, float inter_sweep_current,
144                               float inter_sweep_estimate,
145                               float intra_sweep_estimate) :
146    _percentage(p),
147    _inter_sweep_current(inter_sweep_current),
148    _inter_sweep_estimate(inter_sweep_estimate),
149    _intra_sweep_estimate(intra_sweep_estimate) { }
150 
do_list(AdaptiveFreeList<FreeChunk> * fl)151   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
152     double coalSurplusPercent = _percentage;
153     fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
154     fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
155     fl->set_before_sweep(fl->count());
156     fl->set_bfr_surp(fl->surplus());
157   }
158 };
159 
begin_sweep_dict_census(double coalSurplusPercent,float inter_sweep_current,float inter_sweep_estimate,float intra_sweep_estimate)160 void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
161   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
162   BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
163                         inter_sweep_estimate,
164                         intra_sweep_estimate);
165   bsc.do_tree(root());
166 }
167 
168 // Calculate surpluses for the lists in the tree.
169 class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
170   double percentage;
171  public:
setTreeSurplusClosure(double v)172   setTreeSurplusClosure(double v) { percentage = v; }
173 
do_list(AdaptiveFreeList<FreeChunk> * fl)174   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
175     double splitSurplusPercent = percentage;
176     fl->set_surplus(fl->count() -
177                    (ssize_t)((double)fl->desired() * splitSurplusPercent));
178   }
179 };
180 
set_tree_surplus(double splitSurplusPercent)181 void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
182   setTreeSurplusClosure sts(splitSurplusPercent);
183   sts.do_tree(root());
184 }
185 
186 // Set hints for the lists in the tree.
187 class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
188   size_t hint;
189  public:
setTreeHintsClosure(size_t v)190   setTreeHintsClosure(size_t v) { hint = v; }
191 
do_list(AdaptiveFreeList<FreeChunk> * fl)192   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
193     fl->set_hint(hint);
194     assert(fl->hint() == 0 || fl->hint() > fl->size(),
195       "Current hint is inconsistent");
196     if (fl->surplus() > 0) {
197       hint = fl->size();
198     }
199   }
200 };
201 
set_tree_hints(void)202 void AFLBinaryTreeDictionary::set_tree_hints(void) {
203   setTreeHintsClosure sth(0);
204   sth.do_tree(root());
205 }
206 
207 // Save count before previous sweep and splits and coalesces.
208 class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
do_list(AdaptiveFreeList<FreeChunk> * fl)209   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
210     fl->set_prev_sweep(fl->count());
211     fl->set_coal_births(0);
212     fl->set_coal_deaths(0);
213     fl->set_split_births(0);
214     fl->set_split_deaths(0);
215   }
216 };
217 
clear_tree_census(void)218 void AFLBinaryTreeDictionary::clear_tree_census(void) {
219   clearTreeCensusClosure ctc;
220   ctc.do_tree(root());
221 }
222 
223 // Do reporting and post sweep clean up.
end_sweep_dict_census(double splitSurplusPercent)224 void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
225   // Does walking the tree 3 times hurt?
226   set_tree_surplus(splitSurplusPercent);
227   set_tree_hints();
228   LogTarget(Trace, gc, freelist, stats) log;
229   if (log.is_enabled()) {
230     LogStream out(log);
231     report_statistics(&out);
232   }
233   clear_tree_census();
234 }
235 
236 // Print census information - counts, births, deaths, etc.
237 // for each list in the tree.  Also print some summary
238 // information.
239 class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
240   int _print_line;
241   size_t _total_free;
242   AdaptiveFreeList<FreeChunk> _total;
243 
244  public:
PrintTreeCensusClosure()245   PrintTreeCensusClosure() {
246     _print_line = 0;
247     _total_free = 0;
248   }
total()249   AdaptiveFreeList<FreeChunk>* total() { return &_total; }
total_free()250   size_t total_free() { return _total_free; }
251 
do_list(AdaptiveFreeList<FreeChunk> * fl)252   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
253     LogStreamHandle(Debug, gc, freelist, census) out;
254 
255     if (++_print_line >= 40) {
256       AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
257       _print_line = 0;
258     }
259     fl->print_on(&out);
260     _total_free +=           fl->count()             * fl->size()        ;
261     total()->set_count(      total()->count()        + fl->count()      );
262     total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
263     total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
264     total()->set_desired(    total()->desired()      + fl->desired()    );
265     total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
266     total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
267     total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
268     total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
269     total()->set_split_births(total()->split_births() + fl->split_births());
270     total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
271   }
272 };
273 
print_dict_census(outputStream * st) const274 void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
275 
276   st->print_cr("BinaryTree");
277   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
278   PrintTreeCensusClosure ptc;
279   ptc.do_tree(root());
280 
281   AdaptiveFreeList<FreeChunk>* total = ptc.total();
282   AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
283   total->print_on(st, "TOTAL\t");
284   st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
285                ptc.total_free(),
286                (double)(total->split_births() + total->coal_births()
287                       - total->split_deaths() - total->coal_deaths())
288                /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
289               (double)(total->desired() - total->count())
290               /(total->desired() != 0 ? (double)total->desired() : 1.0));
291 }
292 
293 /////////////////////////////////////////////////////////////////////////
294 //// CompactibleFreeListSpace
295 /////////////////////////////////////////////////////////////////////////
296 
297 // highest ranked  free list lock rank
298 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
299 
300 // Defaults are 0 so things will break badly if incorrectly initialized.
301 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
302 size_t CompactibleFreeListSpace::IndexSetStride = 0;
303 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
304 
305 size_t MinChunkSize = 0;
306 
set_cms_values()307 void CompactibleFreeListSpace::set_cms_values() {
308   // Set CMS global values
309   assert(MinChunkSize == 0, "already set");
310 
311   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
312   // for chunks to contain a FreeChunk.
313   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
314   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
315 
316   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
317   IndexSetStart  = MinChunkSize;
318   IndexSetStride = MinObjAlignment;
319 }
320 
321 // Constructor
CompactibleFreeListSpace(BlockOffsetSharedArray * bs,MemRegion mr)322 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
323   _bt(bs, mr),
324   // free list locks are in the range of values taken by _lockRank
325   // This range currently is [_leaf+2, _leaf+3]
326   // Note: this requires that CFLspace c'tors
327   // are called serially in the order in which the locks are
328   // are acquired in the program text. This is true today.
329   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
330                 Monitor::_safepoint_check_sometimes),
331   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
332                           "CompactibleFreeListSpace._dict_par_lock", true,
333                           Monitor::_safepoint_check_never),
334   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
335                     CMSRescanMultiple),
336   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
337                     CMSConcMarkMultiple),
338   _collector(NULL),
339   _preconsumptionDirtyCardClosure(NULL)
340 {
341   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
342          "FreeChunk is larger than expected");
343   _bt.set_space(this);
344   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
345 
346   _dictionary = new AFLBinaryTreeDictionary(mr);
347 
348   assert(_dictionary != NULL, "CMS dictionary initialization");
349   // The indexed free lists are initially all empty and are lazily
350   // filled in on demand. Initialize the array elements to NULL.
351   initializeIndexedFreeListArray();
352 
353   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
354                              SmallForLinearAlloc);
355 
356   // CMSIndexedFreeListReplenish should be at least 1
357   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
358   _promoInfo.setSpace(this);
359   if (UseCMSBestFit) {
360     _fitStrategy = FreeBlockBestFitFirst;
361   } else {
362     _fitStrategy = FreeBlockStrategyNone;
363   }
364   check_free_list_consistency();
365 
366   // Initialize locks for parallel case.
367   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
368     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
369                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
370     DEBUG_ONLY(
371       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
372     )
373   }
374   _dictionary->set_par_lock(&_parDictionaryAllocLock);
375 
376   _used_stable = 0;
377 }
378 
379 // Like CompactibleSpace forward() but always calls cross_threshold() to
380 // update the block offset table.  Removed initialize_threshold call because
381 // CFLS does not use a block offset array for contiguous spaces.
forward(oop q,size_t size,CompactPoint * cp,HeapWord * compact_top)382 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
383                                     CompactPoint* cp, HeapWord* compact_top) {
384   // q is alive
385   // First check if we should switch compaction space
386   assert(this == cp->space, "'this' should be current compaction space.");
387   size_t compaction_max_size = pointer_delta(end(), compact_top);
388   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
389     "virtual adjustObjectSize_v() method is not correct");
390   size_t adjusted_size = adjustObjectSize(size);
391   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
392          "no small fragments allowed");
393   assert(minimum_free_block_size() == MinChunkSize,
394          "for de-virtualized reference below");
395   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
396   if (adjusted_size + MinChunkSize > compaction_max_size &&
397       adjusted_size != compaction_max_size) {
398     do {
399       // switch to next compaction space
400       cp->space->set_compaction_top(compact_top);
401       cp->space = cp->space->next_compaction_space();
402       if (cp->space == NULL) {
403         cp->gen = CMSHeap::heap()->young_gen();
404         assert(cp->gen != NULL, "compaction must succeed");
405         cp->space = cp->gen->first_compaction_space();
406         assert(cp->space != NULL, "generation must have a first compaction space");
407       }
408       compact_top = cp->space->bottom();
409       cp->space->set_compaction_top(compact_top);
410       // The correct adjusted_size may not be the same as that for this method
411       // (i.e., cp->space may no longer be "this" so adjust the size again.
412       // Use the virtual method which is not used above to save the virtual
413       // dispatch.
414       adjusted_size = cp->space->adjust_object_size_v(size);
415       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
416       assert(cp->space->minimum_free_block_size() == 0, "just checking");
417     } while (adjusted_size > compaction_max_size);
418   }
419 
420   // store the forwarding pointer into the mark word
421   if ((HeapWord*)q != compact_top) {
422     q->forward_to(oop(compact_top));
423     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
424   } else {
425     // if the object isn't moving we can just set the mark to the default
426     // mark and handle it specially later on.
427     q->init_mark_raw();
428     assert(q->forwardee() == NULL, "should be forwarded to NULL");
429   }
430 
431   compact_top += adjusted_size;
432 
433   // we need to update the offset table so that the beginnings of objects can be
434   // found during scavenge.  Note that we are updating the offset table based on
435   // where the object will be once the compaction phase finishes.
436 
437   // Always call cross_threshold().  A contiguous space can only call it when
438   // the compaction_top exceeds the current threshold but not for an
439   // non-contiguous space.
440   cp->threshold =
441     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
442   return compact_top;
443 }
444 
445 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
446 // and use of single_block instead of alloc_block.  The name here is not really
447 // appropriate - maybe a more general name could be invented for both the
448 // contiguous and noncontiguous spaces.
449 
cross_threshold(HeapWord * start,HeapWord * the_end)450 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
451   _bt.single_block(start, the_end);
452   return end();
453 }
454 
455 // Initialize them to NULL.
initializeIndexedFreeListArray()456 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
457   for (size_t i = 0; i < IndexSetSize; i++) {
458     // Note that on platforms where objects are double word aligned,
459     // the odd array elements are not used.  It is convenient, however,
460     // to map directly from the object size to the array element.
461     _indexedFreeList[i].reset(IndexSetSize);
462     _indexedFreeList[i].set_size(i);
463     assert(_indexedFreeList[i].count() == 0, "reset check failed");
464     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
465     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
466     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
467   }
468 }
469 
obj_size(const HeapWord * addr) const470 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
471   return adjustObjectSize(oop(addr)->size());
472 }
473 
resetIndexedFreeListArray()474 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
475   for (size_t i = 1; i < IndexSetSize; i++) {
476     assert(_indexedFreeList[i].size() == (size_t) i,
477       "Indexed free list sizes are incorrect");
478     _indexedFreeList[i].reset(IndexSetSize);
479     assert(_indexedFreeList[i].count() == 0, "reset check failed");
480     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
481     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
482     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
483   }
484 }
485 
reset(MemRegion mr)486 void CompactibleFreeListSpace::reset(MemRegion mr) {
487   resetIndexedFreeListArray();
488   dictionary()->reset();
489   if (BlockOffsetArrayUseUnallocatedBlock) {
490     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
491     // Everything's allocated until proven otherwise.
492     _bt.set_unallocated_block(end());
493   }
494   if (!mr.is_empty()) {
495     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
496     _bt.single_block(mr.start(), mr.word_size());
497     FreeChunk* fc = (FreeChunk*) mr.start();
498     fc->set_size(mr.word_size());
499     if (mr.word_size() >= IndexSetSize ) {
500       returnChunkToDictionary(fc);
501     } else {
502       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
503       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
504     }
505     coalBirth(mr.word_size());
506   }
507   _promoInfo.reset();
508   _smallLinearAllocBlock._ptr = NULL;
509   _smallLinearAllocBlock._word_size = 0;
510 }
511 
reset_after_compaction()512 void CompactibleFreeListSpace::reset_after_compaction() {
513   // Reset the space to the new reality - one free chunk.
514   MemRegion mr(compaction_top(), end());
515   reset(mr);
516   // Now refill the linear allocation block(s) if possible.
517   refillLinearAllocBlocksIfNeeded();
518 }
519 
520 // Walks the entire dictionary, returning a coterminal
521 // chunk, if it exists. Use with caution since it involves
522 // a potentially complete walk of a potentially large tree.
find_chunk_at_end()523 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
524 
525   assert_lock_strong(&_freelistLock);
526 
527   return dictionary()->find_chunk_ends_at(end());
528 }
529 
530 
531 #ifndef PRODUCT
initializeIndexedFreeListArrayReturnedBytes()532 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
533   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
534     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
535   }
536 }
537 
sumIndexedFreeListArrayReturnedBytes()538 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
539   size_t sum = 0;
540   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
541     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
542   }
543   return sum;
544 }
545 
totalCountInIndexedFreeLists() const546 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
547   size_t count = 0;
548   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
549     debug_only(
550       ssize_t total_list_count = 0;
551       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
552          fc = fc->next()) {
553         total_list_count++;
554       }
555       assert(total_list_count ==  _indexedFreeList[i].count(),
556         "Count in list is incorrect");
557     )
558     count += _indexedFreeList[i].count();
559   }
560   return count;
561 }
562 
totalCount()563 size_t CompactibleFreeListSpace::totalCount() {
564   size_t num = totalCountInIndexedFreeLists();
565   num +=  dictionary()->total_count();
566   if (_smallLinearAllocBlock._word_size != 0) {
567     num++;
568   }
569   return num;
570 }
571 #endif
572 
is_free_block(const HeapWord * p) const573 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
574   FreeChunk* fc = (FreeChunk*) p;
575   return fc->is_free();
576 }
577 
used() const578 size_t CompactibleFreeListSpace::used() const {
579   return capacity() - free();
580 }
581 
used_stable() const582 size_t CompactibleFreeListSpace::used_stable() const {
583   return _used_stable;
584 }
585 
recalculate_used_stable()586 void CompactibleFreeListSpace::recalculate_used_stable() {
587   _used_stable = used();
588 }
589 
free() const590 size_t CompactibleFreeListSpace::free() const {
591   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
592   // if you do this while the structures are in flux you
593   // may get an approximate answer only; for instance
594   // because there is concurrent allocation either
595   // directly by mutators or for promotion during a GC.
596   // It's "MT-safe", however, in the sense that you are guaranteed
597   // not to crash and burn, for instance, because of walking
598   // pointers that could disappear as you were walking them.
599   // The approximation is because the various components
600   // that are read below are not read atomically (and
601   // further the computation of totalSizeInIndexedFreeLists()
602   // is itself a non-atomic computation. The normal use of
603   // this is during a resize operation at the end of GC
604   // and at that time you are guaranteed to get the
605   // correct actual value. However, for instance, this is
606   // also read completely asynchronously by the "perf-sampler"
607   // that supports jvmstat, and you are apt to see the values
608   // flicker in such cases.
609   assert(_dictionary != NULL, "No _dictionary?");
610   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
611           totalSizeInIndexedFreeLists() +
612           _smallLinearAllocBlock._word_size) * HeapWordSize;
613 }
614 
max_alloc_in_words() const615 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
616   assert(_dictionary != NULL, "No _dictionary?");
617   assert_locked();
618   size_t res = _dictionary->max_chunk_size();
619   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
620                        (size_t) SmallForLinearAlloc - 1));
621   // XXX the following could potentially be pretty slow;
622   // should one, pessimistically for the rare cases when res
623   // calculated above is less than IndexSetSize,
624   // just return res calculated above? My reasoning was that
625   // those cases will be so rare that the extra time spent doesn't
626   // really matter....
627   // Note: do not change the loop test i >= res + IndexSetStride
628   // to i > res below, because i is unsigned and res may be zero.
629   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
630        i -= IndexSetStride) {
631     if (_indexedFreeList[i].head() != NULL) {
632       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
633       return i;
634     }
635   }
636   return res;
637 }
638 
print_on(outputStream * st) const639 void LinearAllocBlock::print_on(outputStream* st) const {
640   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
641             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
642             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
643 }
644 
print_on(outputStream * st) const645 void CompactibleFreeListSpace::print_on(outputStream* st) const {
646   st->print_cr("COMPACTIBLE FREELIST SPACE");
647   st->print_cr(" Space:");
648   Space::print_on(st);
649 
650   st->print_cr("promoInfo:");
651   _promoInfo.print_on(st);
652 
653   st->print_cr("_smallLinearAllocBlock");
654   _smallLinearAllocBlock.print_on(st);
655 
656   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
657 
658   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
659 }
660 
print_indexed_free_lists(outputStream * st) const661 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
662 const {
663   reportIndexedFreeListStatistics(st);
664   st->print_cr("Layout of Indexed Freelists");
665   st->print_cr("---------------------------");
666   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
667   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
668     _indexedFreeList[i].print_on(st);
669     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
670       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
671                    p2i(fc), p2i((HeapWord*)fc + i),
672                    fc->cantCoalesce() ? "\t CC" : "");
673     }
674   }
675 }
676 
print_promo_info_blocks(outputStream * st) const677 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
678 const {
679   _promoInfo.print_on(st);
680 }
681 
print_dictionary_free_lists(outputStream * st) const682 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
683 const {
684   _dictionary->report_statistics(st);
685   st->print_cr("Layout of Freelists in Tree");
686   st->print_cr("---------------------------");
687   _dictionary->print_free_lists(st);
688 }
689 
690 class BlkPrintingClosure: public BlkClosure {
691   const CMSCollector*             _collector;
692   const CompactibleFreeListSpace* _sp;
693   const CMSBitMap*                _live_bit_map;
694   const bool                      _post_remark;
695   outputStream*                   _st;
696 public:
BlkPrintingClosure(const CMSCollector * collector,const CompactibleFreeListSpace * sp,const CMSBitMap * live_bit_map,outputStream * st)697   BlkPrintingClosure(const CMSCollector* collector,
698                      const CompactibleFreeListSpace* sp,
699                      const CMSBitMap* live_bit_map,
700                      outputStream* st):
701     _collector(collector),
702     _sp(sp),
703     _live_bit_map(live_bit_map),
704     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
705     _st(st) { }
706   size_t do_blk(HeapWord* addr);
707 };
708 
do_blk(HeapWord * addr)709 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
710   size_t sz = _sp->block_size_no_stall(addr, _collector);
711   assert(sz != 0, "Should always be able to compute a size");
712   if (_sp->block_is_obj(addr)) {
713     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
714     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
715       p2i(addr),
716       dead ? "dead" : "live",
717       sz,
718       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
719     if (CMSPrintObjectsInDump && !dead) {
720       oop(addr)->print_on(_st);
721       _st->print_cr("--------------------------------------");
722     }
723   } else { // free block
724     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
725       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
726     if (CMSPrintChunksInDump) {
727       ((FreeChunk*)addr)->print_on(_st);
728       _st->print_cr("--------------------------------------");
729     }
730   }
731   return sz;
732 }
733 
dump_at_safepoint_with_locks(CMSCollector * c,outputStream * st)734 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
735   st->print_cr("=========================");
736   st->print_cr("Block layout in CMS Heap:");
737   st->print_cr("=========================");
738   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
739   blk_iterate(&bpcl);
740 
741   st->print_cr("=======================================");
742   st->print_cr("Order & Layout of Promotion Info Blocks");
743   st->print_cr("=======================================");
744   print_promo_info_blocks(st);
745 
746   st->print_cr("===========================");
747   st->print_cr("Order of Indexed Free Lists");
748   st->print_cr("=========================");
749   print_indexed_free_lists(st);
750 
751   st->print_cr("=================================");
752   st->print_cr("Order of Free Lists in Dictionary");
753   st->print_cr("=================================");
754   print_dictionary_free_lists(st);
755 }
756 
757 
reportFreeListStatistics(const char * title) const758 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
759   assert_lock_strong(&_freelistLock);
760   Log(gc, freelist, stats) log;
761   if (!log.is_debug()) {
762     return;
763   }
764   log.debug("%s", title);
765 
766   LogStream out(log.debug());
767   _dictionary->report_statistics(&out);
768 
769   if (log.is_trace()) {
770     LogStream trace_out(log.trace());
771     reportIndexedFreeListStatistics(&trace_out);
772     size_t total_size = totalSizeInIndexedFreeLists() +
773                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
774     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
775   }
776 }
777 
reportIndexedFreeListStatistics(outputStream * st) const778 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
779   assert_lock_strong(&_freelistLock);
780   st->print_cr("Statistics for IndexedFreeLists:");
781   st->print_cr("--------------------------------");
782   size_t total_size = totalSizeInIndexedFreeLists();
783   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
784   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
785   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
786   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
787   if (free_blocks != 0) {
788     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
789   }
790 }
791 
numFreeBlocksInIndexedFreeLists() const792 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
793   size_t res = 0;
794   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
795     debug_only(
796       ssize_t recount = 0;
797       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
798          fc = fc->next()) {
799         recount += 1;
800       }
801       assert(recount == _indexedFreeList[i].count(),
802         "Incorrect count in list");
803     )
804     res += _indexedFreeList[i].count();
805   }
806   return res;
807 }
808 
maxChunkSizeInIndexedFreeLists() const809 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
810   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
811     if (_indexedFreeList[i].head() != NULL) {
812       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
813       return (size_t)i;
814     }
815   }
816   return 0;
817 }
818 
set_end(HeapWord * value)819 void CompactibleFreeListSpace::set_end(HeapWord* value) {
820   HeapWord* prevEnd = end();
821   assert(prevEnd != value, "unnecessary set_end call");
822   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
823         "New end is below unallocated block");
824   _end = value;
825   if (prevEnd != NULL) {
826     // Resize the underlying block offset table.
827     _bt.resize(pointer_delta(value, bottom()));
828     if (value <= prevEnd) {
829       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
830              "New end is below unallocated block");
831     } else {
832       // Now, take this new chunk and add it to the free blocks.
833       // Note that the BOT has not yet been updated for this block.
834       size_t newFcSize = pointer_delta(value, prevEnd);
835       // Add the block to the free lists, if possible coalescing it
836       // with the last free block, and update the BOT and census data.
837       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
838     }
839   }
840 }
841 
842 class FreeListSpaceDCTOC : public FilteringDCTOC {
843   CompactibleFreeListSpace* _cfls;
844   CMSCollector* _collector;
845   bool _parallel;
846 protected:
847   // Override.
848 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
849   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
850                                        HeapWord* bottom, HeapWord* top, \
851                                        ClosureType* cl);                \
852       void walk_mem_region_with_cl_par(MemRegion mr,                    \
853                                        HeapWord* bottom, HeapWord* top, \
854                                        ClosureType* cl);                \
855     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
856                                        HeapWord* bottom, HeapWord* top, \
857                                        ClosureType* cl)
858   walk_mem_region_with_cl_DECL(OopIterateClosure);
859   walk_mem_region_with_cl_DECL(FilteringClosure);
860 
861 public:
FreeListSpaceDCTOC(CompactibleFreeListSpace * sp,CMSCollector * collector,OopIterateClosure * cl,CardTable::PrecisionStyle precision,HeapWord * boundary,bool parallel)862   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
863                      CMSCollector* collector,
864                      OopIterateClosure* cl,
865                      CardTable::PrecisionStyle precision,
866                      HeapWord* boundary,
867                      bool parallel) :
868     FilteringDCTOC(sp, cl, precision, boundary),
869     _cfls(sp), _collector(collector), _parallel(parallel) {}
870 };
871 
872 // We de-virtualize the block-related calls below, since we know that our
873 // space is a CompactibleFreeListSpace.
874 
875 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
876 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
877                                                  HeapWord* bottom,              \
878                                                  HeapWord* top,                 \
879                                                  ClosureType* cl) {             \
880    if (_parallel) {                                                             \
881      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
882    } else {                                                                     \
883      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
884    }                                                                            \
885 }                                                                               \
886 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
887                                                      HeapWord* bottom,          \
888                                                      HeapWord* top,             \
889                                                      ClosureType* cl) {         \
890   /* Skip parts that are before "mr", in case "block_start" sent us             \
891      back too far. */                                                           \
892   HeapWord* mr_start = mr.start();                                              \
893   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
894   HeapWord* next = bottom + bot_size;                                           \
895   while (next < mr_start) {                                                     \
896     bottom = next;                                                              \
897     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
898     next = bottom + bot_size;                                                   \
899   }                                                                             \
900                                                                                 \
901   while (bottom < top) {                                                        \
902     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
903         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
904                     oop(bottom)) &&                                             \
905         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
906       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
907       bottom += _cfls->adjustObjectSize(word_sz);                               \
908     } else {                                                                    \
909       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
910     }                                                                           \
911   }                                                                             \
912 }                                                                               \
913 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
914                                                        HeapWord* bottom,        \
915                                                        HeapWord* top,           \
916                                                        ClosureType* cl) {       \
917   /* Skip parts that are before "mr", in case "block_start" sent us             \
918      back too far. */                                                           \
919   HeapWord* mr_start = mr.start();                                              \
920   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
921   HeapWord* next = bottom + bot_size;                                           \
922   while (next < mr_start) {                                                     \
923     bottom = next;                                                              \
924     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
925     next = bottom + bot_size;                                                   \
926   }                                                                             \
927                                                                                 \
928   while (bottom < top) {                                                        \
929     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
930         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
931                     oop(bottom)) &&                                             \
932         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
933       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
934       bottom += _cfls->adjustObjectSize(word_sz);                               \
935     } else {                                                                    \
936       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
937     }                                                                           \
938   }                                                                             \
939 }
940 
941 // (There are only two of these, rather than N, because the split is due
942 // only to the introduction of the FilteringClosure, a local part of the
943 // impl of this abstraction.)
944 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)945 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
946 
947 DirtyCardToOopClosure*
948 CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
949                                       CardTable::PrecisionStyle precision,
950                                       HeapWord* boundary,
951                                       bool parallel) {
952   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
953 }
954 
955 
956 // Note on locking for the space iteration functions:
957 // since the collector's iteration activities are concurrent with
958 // allocation activities by mutators, absent a suitable mutual exclusion
959 // mechanism the iterators may go awry. For instance a block being iterated
960 // may suddenly be allocated or divided up and part of it allocated and
961 // so on.
962 
963 // Apply the given closure to each block in the space.
blk_iterate_careful(BlkClosureCareful * cl)964 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
965   assert_lock_strong(freelistLock());
966   HeapWord *cur, *limit;
967   for (cur = bottom(), limit = end(); cur < limit;
968        cur += cl->do_blk_careful(cur));
969 }
970 
971 // Apply the given closure to each block in the space.
blk_iterate(BlkClosure * cl)972 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
973   assert_lock_strong(freelistLock());
974   HeapWord *cur, *limit;
975   for (cur = bottom(), limit = end(); cur < limit;
976        cur += cl->do_blk(cur));
977 }
978 
979 // Apply the given closure to each oop in the space.
oop_iterate(OopIterateClosure * cl)980 void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
981   assert_lock_strong(freelistLock());
982   HeapWord *cur, *limit;
983   size_t curSize;
984   for (cur = bottom(), limit = end(); cur < limit;
985        cur += curSize) {
986     curSize = block_size(cur);
987     if (block_is_obj(cur)) {
988       oop(cur)->oop_iterate(cl);
989     }
990   }
991 }
992 
993 // NOTE: In the following methods, in order to safely be able to
994 // apply the closure to an object, we need to be sure that the
995 // object has been initialized. We are guaranteed that an object
996 // is initialized if we are holding the Heap_lock with the
997 // world stopped.
verify_objects_initialized() const998 void CompactibleFreeListSpace::verify_objects_initialized() const {
999   if (is_init_completed()) {
1000     assert_locked_or_safepoint(Heap_lock);
1001     if (Universe::is_fully_initialized()) {
1002       guarantee(SafepointSynchronize::is_at_safepoint(),
1003                 "Required for objects to be initialized");
1004     }
1005   } // else make a concession at vm start-up
1006 }
1007 
1008 // Apply the given closure to each object in the space
object_iterate(ObjectClosure * blk)1009 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
1010   assert_lock_strong(freelistLock());
1011   NOT_PRODUCT(verify_objects_initialized());
1012   HeapWord *cur, *limit;
1013   size_t curSize;
1014   for (cur = bottom(), limit = end(); cur < limit;
1015        cur += curSize) {
1016     curSize = block_size(cur);
1017     if (block_is_obj(cur)) {
1018       blk->do_object(oop(cur));
1019     }
1020   }
1021 }
1022 
1023 // Apply the given closure to each live object in the space
1024 //   The usage of CompactibleFreeListSpace
1025 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
1026 // objects in the space with references to objects that are no longer
1027 // valid.  For example, an object may reference another object
1028 // that has already been sweep up (collected).  This method uses
1029 // obj_is_alive() to determine whether it is safe to apply the closure to
1030 // an object.  See obj_is_alive() for details on how liveness of an
1031 // object is decided.
1032 
safe_object_iterate(ObjectClosure * blk)1033 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
1034   assert_lock_strong(freelistLock());
1035   NOT_PRODUCT(verify_objects_initialized());
1036   HeapWord *cur, *limit;
1037   size_t curSize;
1038   for (cur = bottom(), limit = end(); cur < limit;
1039        cur += curSize) {
1040     curSize = block_size(cur);
1041     if (block_is_obj(cur) && obj_is_alive(cur)) {
1042       blk->do_object(oop(cur));
1043     }
1044   }
1045 }
1046 
object_iterate_mem(MemRegion mr,UpwardsObjectClosure * cl)1047 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
1048                                                   UpwardsObjectClosure* cl) {
1049   assert_locked(freelistLock());
1050   NOT_PRODUCT(verify_objects_initialized());
1051   assert(!mr.is_empty(), "Should be non-empty");
1052   // We use MemRegion(bottom(), end()) rather than used_region() below
1053   // because the two are not necessarily equal for some kinds of
1054   // spaces, in particular, certain kinds of free list spaces.
1055   // We could use the more complicated but more precise:
1056   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
1057   // but the slight imprecision seems acceptable in the assertion check.
1058   assert(MemRegion(bottom(), end()).contains(mr),
1059          "Should be within used space");
1060   HeapWord* prev = cl->previous();   // max address from last time
1061   if (prev >= mr.end()) { // nothing to do
1062     return;
1063   }
1064   // This assert will not work when we go from cms space to perm
1065   // space, and use same closure. Easy fix deferred for later. XXX YSR
1066   // assert(prev == NULL || contains(prev), "Should be within space");
1067 
1068   bool last_was_obj_array = false;
1069   HeapWord *blk_start_addr, *region_start_addr;
1070   if (prev > mr.start()) {
1071     region_start_addr = prev;
1072     blk_start_addr    = prev;
1073     // The previous invocation may have pushed "prev" beyond the
1074     // last allocated block yet there may be still be blocks
1075     // in this region due to a particular coalescing policy.
1076     // Relax the assertion so that the case where the unallocated
1077     // block is maintained and "prev" is beyond the unallocated
1078     // block does not cause the assertion to fire.
1079     assert((BlockOffsetArrayUseUnallocatedBlock &&
1080             (!is_in(prev))) ||
1081            (blk_start_addr == block_start(region_start_addr)), "invariant");
1082   } else {
1083     region_start_addr = mr.start();
1084     blk_start_addr    = block_start(region_start_addr);
1085   }
1086   HeapWord* region_end_addr = mr.end();
1087   MemRegion derived_mr(region_start_addr, region_end_addr);
1088   while (blk_start_addr < region_end_addr) {
1089     const size_t size = block_size(blk_start_addr);
1090     if (block_is_obj(blk_start_addr)) {
1091       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
1092     } else {
1093       last_was_obj_array = false;
1094     }
1095     blk_start_addr += size;
1096   }
1097   if (!last_was_obj_array) {
1098     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
1099            "Should be within (closed) used space");
1100     assert(blk_start_addr > prev, "Invariant");
1101     cl->set_previous(blk_start_addr); // min address for next time
1102   }
1103 }
1104 
1105 // Callers of this iterator beware: The closure application should
1106 // be robust in the face of uninitialized objects and should (always)
1107 // return a correct size so that the next addr + size below gives us a
1108 // valid block boundary. [See for instance,
1109 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
1110 // in ConcurrentMarkSweepGeneration.cpp.]
1111 HeapWord*
object_iterate_careful_m(MemRegion mr,ObjectClosureCareful * cl)1112 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
1113   ObjectClosureCareful* cl) {
1114   assert_lock_strong(freelistLock());
1115   // Can't use used_region() below because it may not necessarily
1116   // be the same as [bottom(),end()); although we could
1117   // use [used_region().start(),align_up(used_region().end(),CardSize)),
1118   // that appears too cumbersome, so we just do the simpler check
1119   // in the assertion below.
1120   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
1121          "mr should be non-empty and within used space");
1122   HeapWord *addr, *end;
1123   size_t size;
1124   for (addr = block_start_careful(mr.start()), end  = mr.end();
1125        addr < end; addr += size) {
1126     FreeChunk* fc = (FreeChunk*)addr;
1127     if (fc->is_free()) {
1128       // Since we hold the free list lock, which protects direct
1129       // allocation in this generation by mutators, a free object
1130       // will remain free throughout this iteration code.
1131       size = fc->size();
1132     } else {
1133       // Note that the object need not necessarily be initialized,
1134       // because (for instance) the free list lock does NOT protect
1135       // object initialization. The closure application below must
1136       // therefore be correct in the face of uninitialized objects.
1137       size = cl->do_object_careful_m(oop(addr), mr);
1138       if (size == 0) {
1139         // An unparsable object found. Signal early termination.
1140         return addr;
1141       }
1142     }
1143   }
1144   return NULL;
1145 }
1146 
1147 
block_start_const(const void * p) const1148 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
1149   NOT_PRODUCT(verify_objects_initialized());
1150   return _bt.block_start(p);
1151 }
1152 
block_start_careful(const void * p) const1153 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
1154   return _bt.block_start_careful(p);
1155 }
1156 
block_size(const HeapWord * p) const1157 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
1158   NOT_PRODUCT(verify_objects_initialized());
1159   // This must be volatile, or else there is a danger that the compiler
1160   // will compile the code below into a sometimes-infinite loop, by keeping
1161   // the value read the first time in a register.
1162   while (true) {
1163     // We must do this until we get a consistent view of the object.
1164     if (FreeChunk::indicatesFreeChunk(p)) {
1165       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1166       size_t res = fc->size();
1167 
1168       // Bugfix for systems with weak memory model (PPC64/IA64). The
1169       // block's free bit was set and we have read the size of the
1170       // block. Acquire and check the free bit again. If the block is
1171       // still free, the read size is correct.
1172       OrderAccess::acquire();
1173 
1174       // If the object is still a free chunk, return the size, else it
1175       // has been allocated so try again.
1176       if (FreeChunk::indicatesFreeChunk(p)) {
1177         assert(res != 0, "Block size should not be 0");
1178         return res;
1179       }
1180     } else {
1181       // The barrier is required to prevent reordering of the free chunk check
1182       // and the klass read.
1183       OrderAccess::loadload();
1184 
1185       // Ensure klass read before size.
1186       Klass* k = oop(p)->klass_or_null_acquire();
1187       if (k != NULL) {
1188         assert(k->is_klass(), "Should really be klass oop.");
1189         oop o = (oop)p;
1190         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1191 
1192         size_t res = o->size_given_klass(k);
1193         res = adjustObjectSize(res);
1194         assert(res != 0, "Block size should not be 0");
1195         return res;
1196       }
1197     }
1198   }
1199 }
1200 
1201 // TODO: Now that is_parsable is gone, we should combine these two functions.
1202 // A variant of the above that uses the Printezis bits for
1203 // unparsable but allocated objects. This avoids any possible
1204 // stalls waiting for mutators to initialize objects, and is
1205 // thus potentially faster than the variant above. However,
1206 // this variant may return a zero size for a block that is
1207 // under mutation and for which a consistent size cannot be
1208 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
block_size_no_stall(HeapWord * p,const CMSCollector * c) const1209 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1210                                                      const CMSCollector* c)
1211 const {
1212   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1213   // This must be volatile, or else there is a danger that the compiler
1214   // will compile the code below into a sometimes-infinite loop, by keeping
1215   // the value read the first time in a register.
1216   DEBUG_ONLY(uint loops = 0;)
1217   while (true) {
1218     // We must do this until we get a consistent view of the object.
1219     if (FreeChunk::indicatesFreeChunk(p)) {
1220       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1221       size_t res = fc->size();
1222 
1223       // Bugfix for systems with weak memory model (PPC64/IA64). The
1224       // free bit of the block was set and we have read the size of
1225       // the block. Acquire and check the free bit again. If the
1226       // block is still free, the read size is correct.
1227       OrderAccess::acquire();
1228 
1229       if (FreeChunk::indicatesFreeChunk(p)) {
1230         assert(res != 0, "Block size should not be 0");
1231         assert(loops == 0, "Should be 0");
1232         return res;
1233       }
1234     } else {
1235       // The barrier is required to prevent reordering of the free chunk check
1236       // and the klass read.
1237       OrderAccess::loadload();
1238 
1239       // Ensure klass read before size.
1240       Klass* k = oop(p)->klass_or_null_acquire();
1241       if (k != NULL) {
1242         assert(k->is_klass(), "Should really be klass oop.");
1243         oop o = (oop)p;
1244         assert(oopDesc::is_oop(o), "Should be an oop");
1245 
1246         size_t res = o->size_given_klass(k);
1247         res = adjustObjectSize(res);
1248         assert(res != 0, "Block size should not be 0");
1249         return res;
1250       } else {
1251         // May return 0 if P-bits not present.
1252         return c->block_size_if_printezis_bits(p);
1253       }
1254     }
1255     assert(loops == 0, "Can loop at most once");
1256     DEBUG_ONLY(loops++;)
1257   }
1258 }
1259 
block_size_nopar(const HeapWord * p) const1260 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1261   NOT_PRODUCT(verify_objects_initialized());
1262   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1263   FreeChunk* fc = (FreeChunk*)p;
1264   if (fc->is_free()) {
1265     return fc->size();
1266   } else {
1267     // Ignore mark word because this may be a recently promoted
1268     // object whose mark word is used to chain together grey
1269     // objects (the last one would have a null value).
1270     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1271     return adjustObjectSize(oop(p)->size());
1272   }
1273 }
1274 
1275 // This implementation assumes that the property of "being an object" is
1276 // stable.  But being a free chunk may not be (because of parallel
1277 // promotion.)
block_is_obj(const HeapWord * p) const1278 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1279   FreeChunk* fc = (FreeChunk*)p;
1280   assert(is_in_reserved(p), "Should be in space");
1281   if (FreeChunk::indicatesFreeChunk(p)) return false;
1282 
1283   // The barrier is required to prevent reordering of the free chunk check
1284   // and the klass read.
1285   OrderAccess::loadload();
1286 
1287   Klass* k = oop(p)->klass_or_null_acquire();
1288   if (k != NULL) {
1289     // Ignore mark word because it may have been used to
1290     // chain together promoted objects (the last one
1291     // would have a null value).
1292     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1293     return true;
1294   } else {
1295     return false;  // Was not an object at the start of collection.
1296   }
1297 }
1298 
1299 // Check if the object is alive. This fact is checked either by consulting
1300 // the main marking bitmap in the sweeping phase or, if it's a permanent
1301 // generation and we're not in the sweeping phase, by checking the
1302 // perm_gen_verify_bit_map where we store the "deadness" information if
1303 // we did not sweep the perm gen in the most recent previous GC cycle.
obj_is_alive(const HeapWord * p) const1304 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1305   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1306          "Else races are possible");
1307   assert(block_is_obj(p), "The address should point to an object");
1308 
1309   // If we're sweeping, we use object liveness information from the main bit map
1310   // for both perm gen and old gen.
1311   // We don't need to lock the bitmap (live_map or dead_map below), because
1312   // EITHER we are in the middle of the sweeping phase, and the
1313   // main marking bit map (live_map below) is locked,
1314   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1315   // is stable, because it's mutated only in the sweeping phase.
1316   // NOTE: This method is also used by jmap where, if class unloading is
1317   // off, the results can return "false" for legitimate perm objects,
1318   // when we are not in the midst of a sweeping phase, which can result
1319   // in jmap not reporting certain perm gen objects. This will be moot
1320   // if/when the perm gen goes away in the future.
1321   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1322     CMSBitMap* live_map = _collector->markBitMap();
1323     return live_map->par_isMarked((HeapWord*) p);
1324   }
1325   return true;
1326 }
1327 
block_is_obj_nopar(const HeapWord * p) const1328 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1329   FreeChunk* fc = (FreeChunk*)p;
1330   assert(is_in_reserved(p), "Should be in space");
1331   assert(_bt.block_start(p) == p, "Should be a block boundary");
1332   if (!fc->is_free()) {
1333     // Ignore mark word because it may have been used to
1334     // chain together promoted objects (the last one
1335     // would have a null value).
1336     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1337     return true;
1338   }
1339   return false;
1340 }
1341 
1342 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1343 // approximate answer if you don't hold the freelistlock when you call this.
totalSizeInIndexedFreeLists() const1344 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1345   size_t size = 0;
1346   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1347     debug_only(
1348       // We may be calling here without the lock in which case we
1349       // won't do this modest sanity check.
1350       if (freelistLock()->owned_by_self()) {
1351         size_t total_list_size = 0;
1352         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1353           fc = fc->next()) {
1354           total_list_size += i;
1355         }
1356         assert(total_list_size == i * _indexedFreeList[i].count(),
1357                "Count in list is incorrect");
1358       }
1359     )
1360     size += i * _indexedFreeList[i].count();
1361   }
1362   return size;
1363 }
1364 
par_allocate(size_t size)1365 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1366   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1367   return allocate(size);
1368 }
1369 
1370 HeapWord*
getChunkFromSmallLinearAllocBlockRemainder(size_t size)1371 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1372   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1373 }
1374 
allocate(size_t size)1375 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1376   assert_lock_strong(freelistLock());
1377   HeapWord* res = NULL;
1378   assert(size == adjustObjectSize(size),
1379          "use adjustObjectSize() before calling into allocate()");
1380 
1381   res = allocate_adaptive_freelists(size);
1382 
1383   if (res != NULL) {
1384     // check that res does lie in this space!
1385     assert(is_in_reserved(res), "Not in this space!");
1386     assert(is_aligned((void*)res), "alignment check");
1387 
1388     FreeChunk* fc = (FreeChunk*)res;
1389     fc->markNotFree();
1390     assert(!fc->is_free(), "shouldn't be marked free");
1391     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1392     // Verify that the block offset table shows this to
1393     // be a single block, but not one which is unallocated.
1394     _bt.verify_single_block(res, size);
1395     _bt.verify_not_unallocated(res, size);
1396     // mangle a just allocated object with a distinct pattern.
1397     debug_only(fc->mangleAllocated(size));
1398   }
1399 
1400   // During GC we do not need to recalculate the stable used value for
1401   // every allocation in old gen. It is done once at the end of GC instead
1402   // for performance reasons.
1403   if (!CMSHeap::heap()->is_gc_active()) {
1404     recalculate_used_stable();
1405   }
1406 
1407   return res;
1408 }
1409 
allocate_adaptive_freelists(size_t size)1410 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1411   assert_lock_strong(freelistLock());
1412   HeapWord* res = NULL;
1413   assert(size == adjustObjectSize(size),
1414          "use adjustObjectSize() before calling into allocate()");
1415 
1416   // Strategy
1417   //   if small
1418   //     exact size from small object indexed list if small
1419   //     small or large linear allocation block (linAB) as appropriate
1420   //     take from lists of greater sized chunks
1421   //   else
1422   //     dictionary
1423   //     small or large linear allocation block if it has the space
1424   // Try allocating exact size from indexTable first
1425   if (size < IndexSetSize) {
1426     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1427     if(res != NULL) {
1428       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1429         "Not removed from free list");
1430       // no block offset table adjustment is necessary on blocks in
1431       // the indexed lists.
1432 
1433     // Try allocating from the small LinAB
1434     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1435         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1436         // if successful, the above also adjusts block offset table
1437         // Note that this call will refill the LinAB to
1438         // satisfy the request.  This is different that
1439         // evm.
1440         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1441     } else {
1442       // Raid the exact free lists larger than size, even if they are not
1443       // overpopulated.
1444       res = (HeapWord*) getChunkFromGreater(size);
1445     }
1446   } else {
1447     // Big objects get allocated directly from the dictionary.
1448     res = (HeapWord*) getChunkFromDictionaryExact(size);
1449     if (res == NULL) {
1450       // Try hard not to fail since an allocation failure will likely
1451       // trigger a synchronous GC.  Try to get the space from the
1452       // allocation blocks.
1453       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1454     }
1455   }
1456 
1457   return res;
1458 }
1459 
1460 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1461 // when promoting obj.
expansionSpaceRequired(size_t obj_size) const1462 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1463   // Depending on the object size, expansion may require refilling either a
1464   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1465   // is added because the dictionary may over-allocate to avoid fragmentation.
1466   size_t space = obj_size;
1467   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1468   return space;
1469 }
1470 
getChunkFromGreater(size_t numWords)1471 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1472   FreeChunk* ret;
1473 
1474   assert(numWords >= MinChunkSize, "Size is less than minimum");
1475   assert(linearAllocationWouldFail() || bestFitFirst(),
1476     "Should not be here");
1477 
1478   size_t i;
1479   size_t currSize = numWords + MinChunkSize;
1480   assert(is_object_aligned(currSize), "currSize should be aligned");
1481   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1482     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1483     if (fl->head()) {
1484       ret = getFromListGreater(fl, numWords);
1485       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1486       return ret;
1487     }
1488   }
1489 
1490   currSize = MAX2((size_t)SmallForDictionary,
1491                   (size_t)(numWords + MinChunkSize));
1492 
1493   /* Try to get a chunk that satisfies request, while avoiding
1494      fragmentation that can't be handled. */
1495   {
1496     ret =  dictionary()->get_chunk(currSize);
1497     if (ret != NULL) {
1498       assert(ret->size() - numWords >= MinChunkSize,
1499              "Chunk is too small");
1500       _bt.allocated((HeapWord*)ret, ret->size());
1501       /* Carve returned chunk. */
1502       (void) splitChunkAndReturnRemainder(ret, numWords);
1503       /* Label this as no longer a free chunk. */
1504       assert(ret->is_free(), "This chunk should be free");
1505       ret->link_prev(NULL);
1506     }
1507     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1508     return ret;
1509   }
1510   ShouldNotReachHere();
1511 }
1512 
verifyChunkInIndexedFreeLists(FreeChunk * fc) const1513 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1514   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1515   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1516 }
1517 
verify_chunk_is_linear_alloc_block(FreeChunk * fc) const1518 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1519   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1520          (_smallLinearAllocBlock._word_size == fc->size()),
1521          "Linear allocation block shows incorrect size");
1522   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1523           (_smallLinearAllocBlock._word_size == fc->size()));
1524 }
1525 
1526 // Check if the purported free chunk is present either as a linear
1527 // allocation block, the size-indexed table of (smaller) free blocks,
1528 // or the larger free blocks kept in the binary tree dictionary.
verify_chunk_in_free_list(FreeChunk * fc) const1529 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1530   if (verify_chunk_is_linear_alloc_block(fc)) {
1531     return true;
1532   } else if (fc->size() < IndexSetSize) {
1533     return verifyChunkInIndexedFreeLists(fc);
1534   } else {
1535     return dictionary()->verify_chunk_in_free_list(fc);
1536   }
1537 }
1538 
1539 #ifndef PRODUCT
assert_locked() const1540 void CompactibleFreeListSpace::assert_locked() const {
1541   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1542 }
1543 
assert_locked(const Mutex * lock) const1544 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1545   CMSLockVerifier::assert_locked(lock);
1546 }
1547 #endif
1548 
allocateScratch(size_t size)1549 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1550   // In the parallel case, the main thread holds the free list lock
1551   // on behalf the parallel threads.
1552   FreeChunk* fc;
1553   {
1554     // If GC is parallel, this might be called by several threads.
1555     // This should be rare enough that the locking overhead won't affect
1556     // the sequential code.
1557     MutexLockerEx x(parDictionaryAllocLock(),
1558                     Mutex::_no_safepoint_check_flag);
1559     fc = getChunkFromDictionary(size);
1560   }
1561   if (fc != NULL) {
1562     fc->dontCoalesce();
1563     assert(fc->is_free(), "Should be free, but not coalescable");
1564     // Verify that the block offset table shows this to
1565     // be a single block, but not one which is unallocated.
1566     _bt.verify_single_block((HeapWord*)fc, fc->size());
1567     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1568   }
1569   return fc;
1570 }
1571 
promote(oop obj,size_t obj_size)1572 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1573   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1574   assert_locked();
1575 
1576   // if we are tracking promotions, then first ensure space for
1577   // promotion (including spooling space for saving header if necessary).
1578   // then allocate and copy, then track promoted info if needed.
1579   // When tracking (see PromotionInfo::track()), the mark word may
1580   // be displaced and in this case restoration of the mark word
1581   // occurs in the (oop_since_save_marks_)iterate phase.
1582   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1583     return NULL;
1584   }
1585   // Call the allocate(size_t, bool) form directly to avoid the
1586   // additional call through the allocate(size_t) form.  Having
1587   // the compile inline the call is problematic because allocate(size_t)
1588   // is a virtual method.
1589   HeapWord* res = allocate(adjustObjectSize(obj_size));
1590   if (res != NULL) {
1591     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1592     // if we should be tracking promotions, do so.
1593     if (_promoInfo.tracking()) {
1594         _promoInfo.track((PromotedObject*)res);
1595     }
1596   }
1597   return oop(res);
1598 }
1599 
1600 HeapWord*
getChunkFromSmallLinearAllocBlock(size_t size)1601 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1602   assert_locked();
1603   assert(size >= MinChunkSize, "minimum chunk size");
1604   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1605     "maximum from smallLinearAllocBlock");
1606   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1607 }
1608 
1609 HeapWord*
getChunkFromLinearAllocBlock(LinearAllocBlock * blk,size_t size)1610 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1611                                                        size_t size) {
1612   assert_locked();
1613   assert(size >= MinChunkSize, "too small");
1614   HeapWord* res = NULL;
1615   // Try to do linear allocation from blk, making sure that
1616   if (blk->_word_size == 0) {
1617     // We have probably been unable to fill this either in the prologue or
1618     // when it was exhausted at the last linear allocation. Bail out until
1619     // next time.
1620     assert(blk->_ptr == NULL, "consistency check");
1621     return NULL;
1622   }
1623   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1624   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1625   if (res != NULL) return res;
1626 
1627   // about to exhaust this linear allocation block
1628   if (blk->_word_size == size) { // exactly satisfied
1629     res = blk->_ptr;
1630     _bt.allocated(res, blk->_word_size);
1631   } else if (size + MinChunkSize <= blk->_refillSize) {
1632     size_t sz = blk->_word_size;
1633     // Update _unallocated_block if the size is such that chunk would be
1634     // returned to the indexed free list.  All other chunks in the indexed
1635     // free lists are allocated from the dictionary so that _unallocated_block
1636     // has already been adjusted for them.  Do it here so that the cost
1637     // for all chunks added back to the indexed free lists.
1638     if (sz < SmallForDictionary) {
1639       _bt.allocated(blk->_ptr, sz);
1640     }
1641     // Return the chunk that isn't big enough, and then refill below.
1642     addChunkToFreeLists(blk->_ptr, sz);
1643     split_birth(sz);
1644     // Don't keep statistics on adding back chunk from a LinAB.
1645   } else {
1646     // A refilled block would not satisfy the request.
1647     return NULL;
1648   }
1649 
1650   blk->_ptr = NULL; blk->_word_size = 0;
1651   refillLinearAllocBlock(blk);
1652   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1653          "block was replenished");
1654   if (res != NULL) {
1655     split_birth(size);
1656     repairLinearAllocBlock(blk);
1657   } else if (blk->_ptr != NULL) {
1658     res = blk->_ptr;
1659     size_t blk_size = blk->_word_size;
1660     blk->_word_size -= size;
1661     blk->_ptr  += size;
1662     split_birth(size);
1663     repairLinearAllocBlock(blk);
1664     // Update BOT last so that other (parallel) GC threads see a consistent
1665     // view of the BOT and free blocks.
1666     // Above must occur before BOT is updated below.
1667     OrderAccess::storestore();
1668     _bt.split_block(res, blk_size, size);  // adjust block offset table
1669   }
1670   return res;
1671 }
1672 
getChunkFromLinearAllocBlockRemainder(LinearAllocBlock * blk,size_t size)1673 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1674                                         LinearAllocBlock* blk,
1675                                         size_t size) {
1676   assert_locked();
1677   assert(size >= MinChunkSize, "too small");
1678 
1679   HeapWord* res = NULL;
1680   // This is the common case.  Keep it simple.
1681   if (blk->_word_size >= size + MinChunkSize) {
1682     assert(blk->_ptr != NULL, "consistency check");
1683     res = blk->_ptr;
1684     // Note that the BOT is up-to-date for the linAB before allocation.  It
1685     // indicates the start of the linAB.  The split_block() updates the
1686     // BOT for the linAB after the allocation (indicates the start of the
1687     // next chunk to be allocated).
1688     size_t blk_size = blk->_word_size;
1689     blk->_word_size -= size;
1690     blk->_ptr  += size;
1691     split_birth(size);
1692     repairLinearAllocBlock(blk);
1693     // Update BOT last so that other (parallel) GC threads see a consistent
1694     // view of the BOT and free blocks.
1695     // Above must occur before BOT is updated below.
1696     OrderAccess::storestore();
1697     _bt.split_block(res, blk_size, size);  // adjust block offset table
1698     _bt.allocated(res, size);
1699   }
1700   return res;
1701 }
1702 
1703 FreeChunk*
getChunkFromIndexedFreeList(size_t size)1704 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1705   assert_locked();
1706   assert(size < SmallForDictionary, "just checking");
1707   FreeChunk* res;
1708   res = _indexedFreeList[size].get_chunk_at_head();
1709   if (res == NULL) {
1710     res = getChunkFromIndexedFreeListHelper(size);
1711   }
1712   _bt.verify_not_unallocated((HeapWord*) res, size);
1713   assert(res == NULL || res->size() == size, "Incorrect block size");
1714   return res;
1715 }
1716 
1717 FreeChunk*
getChunkFromIndexedFreeListHelper(size_t size,bool replenish)1718 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1719   bool replenish) {
1720   assert_locked();
1721   FreeChunk* fc = NULL;
1722   if (size < SmallForDictionary) {
1723     assert(_indexedFreeList[size].head() == NULL ||
1724       _indexedFreeList[size].surplus() <= 0,
1725       "List for this size should be empty or under populated");
1726     // Try best fit in exact lists before replenishing the list
1727     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1728       // Replenish list.
1729       //
1730       // Things tried that failed.
1731       //   Tried allocating out of the two LinAB's first before
1732       // replenishing lists.
1733       //   Tried small linAB of size 256 (size in indexed list)
1734       // and replenishing indexed lists from the small linAB.
1735       //
1736       FreeChunk* newFc = NULL;
1737       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1738       if (replenish_size < SmallForDictionary) {
1739         // Do not replenish from an underpopulated size.
1740         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1741             _indexedFreeList[replenish_size].head() != NULL) {
1742           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1743         } else if (bestFitFirst()) {
1744           newFc = bestFitSmall(replenish_size);
1745         }
1746       }
1747       if (newFc == NULL && replenish_size > size) {
1748         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1749         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1750       }
1751       // Note: The stats update re split-death of block obtained above
1752       // will be recorded below precisely when we know we are going to
1753       // be actually splitting it into more than one pieces below.
1754       if (newFc != NULL) {
1755         if  (replenish || CMSReplenishIntermediate) {
1756           // Replenish this list and return one block to caller.
1757           size_t i;
1758           FreeChunk *curFc, *nextFc;
1759           size_t num_blk = newFc->size() / size;
1760           assert(num_blk >= 1, "Smaller than requested?");
1761           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1762           if (num_blk > 1) {
1763             // we are sure we will be splitting the block just obtained
1764             // into multiple pieces; record the split-death of the original
1765             splitDeath(replenish_size);
1766           }
1767           // carve up and link blocks 0, ..., num_blk - 2
1768           // The last chunk is not added to the lists but is returned as the
1769           // free chunk.
1770           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1771                i = 0;
1772                i < (num_blk - 1);
1773                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1774                i++) {
1775             curFc->set_size(size);
1776             // Don't record this as a return in order to try and
1777             // determine the "returns" from a GC.
1778             _bt.verify_not_unallocated((HeapWord*) fc, size);
1779             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1780             _bt.mark_block((HeapWord*)curFc, size);
1781             split_birth(size);
1782             // Don't record the initial population of the indexed list
1783             // as a split birth.
1784           }
1785 
1786           // check that the arithmetic was OK above
1787           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1788             "inconsistency in carving newFc");
1789           curFc->set_size(size);
1790           _bt.mark_block((HeapWord*)curFc, size);
1791           split_birth(size);
1792           fc = curFc;
1793         } else {
1794           // Return entire block to caller
1795           fc = newFc;
1796         }
1797       }
1798     }
1799   } else {
1800     // Get a free chunk from the free chunk dictionary to be returned to
1801     // replenish the indexed free list.
1802     fc = getChunkFromDictionaryExact(size);
1803   }
1804   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1805   return fc;
1806 }
1807 
1808 FreeChunk*
getChunkFromDictionary(size_t size)1809 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1810   assert_locked();
1811   FreeChunk* fc = _dictionary->get_chunk(size);
1812   if (fc == NULL) {
1813     return NULL;
1814   }
1815   _bt.allocated((HeapWord*)fc, fc->size());
1816   if (fc->size() >= size + MinChunkSize) {
1817     fc = splitChunkAndReturnRemainder(fc, size);
1818   }
1819   assert(fc->size() >= size, "chunk too small");
1820   assert(fc->size() < size + MinChunkSize, "chunk too big");
1821   _bt.verify_single_block((HeapWord*)fc, fc->size());
1822   return fc;
1823 }
1824 
1825 FreeChunk*
getChunkFromDictionaryExact(size_t size)1826 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1827   assert_locked();
1828   FreeChunk* fc = _dictionary->get_chunk(size);
1829   if (fc == NULL) {
1830     return fc;
1831   }
1832   _bt.allocated((HeapWord*)fc, fc->size());
1833   if (fc->size() == size) {
1834     _bt.verify_single_block((HeapWord*)fc, size);
1835     return fc;
1836   }
1837   assert(fc->size() > size, "get_chunk() guarantee");
1838   if (fc->size() < size + MinChunkSize) {
1839     // Return the chunk to the dictionary and go get a bigger one.
1840     returnChunkToDictionary(fc);
1841     fc = _dictionary->get_chunk(size + MinChunkSize);
1842     if (fc == NULL) {
1843       return NULL;
1844     }
1845     _bt.allocated((HeapWord*)fc, fc->size());
1846   }
1847   assert(fc->size() >= size + MinChunkSize, "tautology");
1848   fc = splitChunkAndReturnRemainder(fc, size);
1849   assert(fc->size() == size, "chunk is wrong size");
1850   _bt.verify_single_block((HeapWord*)fc, size);
1851   return fc;
1852 }
1853 
1854 void
returnChunkToDictionary(FreeChunk * chunk)1855 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1856   assert_locked();
1857 
1858   size_t size = chunk->size();
1859   _bt.verify_single_block((HeapWord*)chunk, size);
1860   // adjust _unallocated_block downward, as necessary
1861   _bt.freed((HeapWord*)chunk, size);
1862   _dictionary->return_chunk(chunk);
1863 #ifndef PRODUCT
1864   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1865     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1866     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1867     tl->verify_stats();
1868   }
1869 #endif // PRODUCT
1870 }
1871 
1872 void
returnChunkToFreeList(FreeChunk * fc)1873 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1874   assert_locked();
1875   size_t size = fc->size();
1876   _bt.verify_single_block((HeapWord*) fc, size);
1877   _bt.verify_not_unallocated((HeapWord*) fc, size);
1878   _indexedFreeList[size].return_chunk_at_tail(fc);
1879 #ifndef PRODUCT
1880   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1881      _indexedFreeList[size].verify_stats();
1882   }
1883 #endif // PRODUCT
1884 }
1885 
1886 // Add chunk to end of last block -- if it's the largest
1887 // block -- and update BOT and census data. We would
1888 // of course have preferred to coalesce it with the
1889 // last block, but it's currently less expensive to find the
1890 // largest block than it is to find the last.
1891 void
addChunkToFreeListsAtEndRecordingStats(HeapWord * chunk,size_t size)1892 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1893   HeapWord* chunk, size_t     size) {
1894   // check that the chunk does lie in this space!
1895   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1896   // One of the parallel gc task threads may be here
1897   // whilst others are allocating.
1898   Mutex* lock = &_parDictionaryAllocLock;
1899   FreeChunk* ec;
1900   {
1901     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1902     ec = dictionary()->find_largest_dict();  // get largest block
1903     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1904       // It's a coterminal block - we can coalesce.
1905       size_t old_size = ec->size();
1906       coalDeath(old_size);
1907       removeChunkFromDictionary(ec);
1908       size += old_size;
1909     } else {
1910       ec = (FreeChunk*)chunk;
1911     }
1912   }
1913   ec->set_size(size);
1914   debug_only(ec->mangleFreed(size));
1915   if (size < SmallForDictionary) {
1916     lock = _indexedFreeListParLocks[size];
1917   }
1918   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1919   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1920   // record the birth under the lock since the recording involves
1921   // manipulation of the list on which the chunk lives and
1922   // if the chunk is allocated and is the last on the list,
1923   // the list can go away.
1924   coalBirth(size);
1925 }
1926 
1927 void
addChunkToFreeLists(HeapWord * chunk,size_t size)1928 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1929                                               size_t     size) {
1930   // check that the chunk does lie in this space!
1931   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1932   assert_locked();
1933   _bt.verify_single_block(chunk, size);
1934 
1935   FreeChunk* fc = (FreeChunk*) chunk;
1936   fc->set_size(size);
1937   debug_only(fc->mangleFreed(size));
1938   if (size < SmallForDictionary) {
1939     returnChunkToFreeList(fc);
1940   } else {
1941     returnChunkToDictionary(fc);
1942   }
1943 }
1944 
1945 void
addChunkAndRepairOffsetTable(HeapWord * chunk,size_t size,bool coalesced)1946 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1947   size_t size, bool coalesced) {
1948   assert_locked();
1949   assert(chunk != NULL, "null chunk");
1950   if (coalesced) {
1951     // repair BOT
1952     _bt.single_block(chunk, size);
1953   }
1954   addChunkToFreeLists(chunk, size);
1955 }
1956 
1957 // We _must_ find the purported chunk on our free lists;
1958 // we assert if we don't.
1959 void
removeFreeChunkFromFreeLists(FreeChunk * fc)1960 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1961   size_t size = fc->size();
1962   assert_locked();
1963   debug_only(verifyFreeLists());
1964   if (size < SmallForDictionary) {
1965     removeChunkFromIndexedFreeList(fc);
1966   } else {
1967     removeChunkFromDictionary(fc);
1968   }
1969   _bt.verify_single_block((HeapWord*)fc, size);
1970   debug_only(verifyFreeLists());
1971 }
1972 
1973 void
removeChunkFromDictionary(FreeChunk * fc)1974 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1975   size_t size = fc->size();
1976   assert_locked();
1977   assert(fc != NULL, "null chunk");
1978   _bt.verify_single_block((HeapWord*)fc, size);
1979   _dictionary->remove_chunk(fc);
1980   // adjust _unallocated_block upward, as necessary
1981   _bt.allocated((HeapWord*)fc, size);
1982 }
1983 
1984 void
removeChunkFromIndexedFreeList(FreeChunk * fc)1985 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1986   assert_locked();
1987   size_t size = fc->size();
1988   _bt.verify_single_block((HeapWord*)fc, size);
1989   NOT_PRODUCT(
1990     if (FLSVerifyIndexTable) {
1991       verifyIndexedFreeList(size);
1992     }
1993   )
1994   _indexedFreeList[size].remove_chunk(fc);
1995   NOT_PRODUCT(
1996     if (FLSVerifyIndexTable) {
1997       verifyIndexedFreeList(size);
1998     }
1999   )
2000 }
2001 
bestFitSmall(size_t numWords)2002 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
2003   /* A hint is the next larger size that has a surplus.
2004      Start search at a size large enough to guarantee that
2005      the excess is >= MIN_CHUNK. */
2006   size_t start = align_object_size(numWords + MinChunkSize);
2007   if (start < IndexSetSize) {
2008     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
2009     size_t    hint = _indexedFreeList[start].hint();
2010     while (hint < IndexSetSize) {
2011       assert(is_object_aligned(hint), "hint should be aligned");
2012       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
2013       if (fl->surplus() > 0 && fl->head() != NULL) {
2014         // Found a list with surplus, reset original hint
2015         // and split out a free chunk which is returned.
2016         _indexedFreeList[start].set_hint(hint);
2017         FreeChunk* res = getFromListGreater(fl, numWords);
2018         assert(res == NULL || res->is_free(),
2019           "Should be returning a free chunk");
2020         return res;
2021       }
2022       hint = fl->hint(); /* keep looking */
2023     }
2024     /* None found. */
2025     it[start].set_hint(IndexSetSize);
2026   }
2027   return NULL;
2028 }
2029 
2030 /* Requires fl->size >= numWords + MinChunkSize */
getFromListGreater(AdaptiveFreeList<FreeChunk> * fl,size_t numWords)2031 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
2032   size_t numWords) {
2033   FreeChunk *curr = fl->head();
2034   size_t oldNumWords = curr->size();
2035   assert(numWords >= MinChunkSize, "Word size is too small");
2036   assert(curr != NULL, "List is empty");
2037   assert(oldNumWords >= numWords + MinChunkSize,
2038         "Size of chunks in the list is too small");
2039 
2040   fl->remove_chunk(curr);
2041   // recorded indirectly by splitChunkAndReturnRemainder -
2042   // smallSplit(oldNumWords, numWords);
2043   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
2044   // Does anything have to be done for the remainder in terms of
2045   // fixing the card table?
2046   assert(new_chunk == NULL || new_chunk->is_free(),
2047     "Should be returning a free chunk");
2048   return new_chunk;
2049 }
2050 
2051 FreeChunk*
splitChunkAndReturnRemainder(FreeChunk * chunk,size_t new_size)2052 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
2053   size_t new_size) {
2054   assert_locked();
2055   size_t size = chunk->size();
2056   assert(size > new_size, "Split from a smaller block?");
2057   assert(is_aligned(chunk), "alignment problem");
2058   assert(size == adjustObjectSize(size), "alignment problem");
2059   size_t rem_sz = size - new_size;
2060   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
2061   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
2062   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
2063   assert(is_aligned(ffc), "alignment problem");
2064   ffc->set_size(rem_sz);
2065   ffc->link_next(NULL);
2066   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2067   // Above must occur before BOT is updated below.
2068   // adjust block offset table
2069   OrderAccess::storestore();
2070   assert(chunk->is_free() && ffc->is_free(), "Error");
2071   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
2072   if (rem_sz < SmallForDictionary) {
2073     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
2074     bool is_par = Thread::current()->is_GC_task_thread();
2075     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
2076     returnChunkToFreeList(ffc);
2077     split(size, rem_sz);
2078     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
2079   } else {
2080     returnChunkToDictionary(ffc);
2081     split(size, rem_sz);
2082   }
2083   chunk->set_size(new_size);
2084   return chunk;
2085 }
2086 
2087 void
sweep_completed()2088 CompactibleFreeListSpace::sweep_completed() {
2089   // Now that space is probably plentiful, refill linear
2090   // allocation blocks as needed.
2091   refillLinearAllocBlocksIfNeeded();
2092 }
2093 
2094 void
gc_prologue()2095 CompactibleFreeListSpace::gc_prologue() {
2096   assert_locked();
2097   reportFreeListStatistics("Before GC:");
2098   refillLinearAllocBlocksIfNeeded();
2099 }
2100 
2101 void
gc_epilogue()2102 CompactibleFreeListSpace::gc_epilogue() {
2103   assert_locked();
2104   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2105   _promoInfo.stopTrackingPromotions();
2106   repairLinearAllocationBlocks();
2107   reportFreeListStatistics("After GC:");
2108 }
2109 
2110 // Iteration support, mostly delegated from a CMS generation
2111 
save_marks()2112 void CompactibleFreeListSpace::save_marks() {
2113   assert(Thread::current()->is_VM_thread(),
2114          "Global variable should only be set when single-threaded");
2115   // Mark the "end" of the used space at the time of this call;
2116   // note, however, that promoted objects from this point
2117   // on are tracked in the _promoInfo below.
2118   set_saved_mark_word(unallocated_block());
2119 #ifdef ASSERT
2120   // Check the sanity of save_marks() etc.
2121   MemRegion ur    = used_region();
2122   MemRegion urasm = used_region_at_save_marks();
2123   assert(ur.contains(urasm),
2124          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2125          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2126          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
2127 #endif
2128   // inform allocator that promotions should be tracked.
2129   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2130   _promoInfo.startTrackingPromotions();
2131 }
2132 
no_allocs_since_save_marks()2133 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2134   assert(_promoInfo.tracking(), "No preceding save_marks?");
2135   return _promoInfo.noPromotions();
2136 }
2137 
linearAllocationWouldFail() const2138 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2139   return _smallLinearAllocBlock._word_size == 0;
2140 }
2141 
repairLinearAllocationBlocks()2142 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2143   // Fix up linear allocation blocks to look like free blocks
2144   repairLinearAllocBlock(&_smallLinearAllocBlock);
2145 }
2146 
repairLinearAllocBlock(LinearAllocBlock * blk)2147 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2148   assert_locked();
2149   if (blk->_ptr != NULL) {
2150     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2151            "Minimum block size requirement");
2152     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2153     fc->set_size(blk->_word_size);
2154     fc->link_prev(NULL);   // mark as free
2155     fc->dontCoalesce();
2156     assert(fc->is_free(), "just marked it free");
2157     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2158   }
2159 }
2160 
refillLinearAllocBlocksIfNeeded()2161 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2162   assert_locked();
2163   if (_smallLinearAllocBlock._ptr == NULL) {
2164     assert(_smallLinearAllocBlock._word_size == 0,
2165       "Size of linAB should be zero if the ptr is NULL");
2166     // Reset the linAB refill and allocation size limit.
2167     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2168   }
2169   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2170 }
2171 
2172 void
refillLinearAllocBlockIfNeeded(LinearAllocBlock * blk)2173 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2174   assert_locked();
2175   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2176          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2177          "blk invariant");
2178   if (blk->_ptr == NULL) {
2179     refillLinearAllocBlock(blk);
2180   }
2181 }
2182 
2183 void
refillLinearAllocBlock(LinearAllocBlock * blk)2184 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2185   assert_locked();
2186   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2187          "linear allocation block should be empty");
2188   FreeChunk* fc;
2189   if (blk->_refillSize < SmallForDictionary &&
2190       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2191     // A linAB's strategy might be to use small sizes to reduce
2192     // fragmentation but still get the benefits of allocation from a
2193     // linAB.
2194   } else {
2195     fc = getChunkFromDictionary(blk->_refillSize);
2196   }
2197   if (fc != NULL) {
2198     blk->_ptr  = (HeapWord*)fc;
2199     blk->_word_size = fc->size();
2200     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2201   }
2202 }
2203 
2204 // Support for compaction
prepare_for_compaction(CompactPoint * cp)2205 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2206   scan_and_forward(this, cp);
2207   // Prepare_for_compaction() uses the space between live objects
2208   // so that later phase can skip dead space quickly.  So verification
2209   // of the free lists doesn't work after.
2210 }
2211 
adjust_pointers()2212 void CompactibleFreeListSpace::adjust_pointers() {
2213   // In other versions of adjust_pointers(), a bail out
2214   // based on the amount of live data in the generation
2215   // (i.e., if 0, bail out) may be used.
2216   // Cannot test used() == 0 here because the free lists have already
2217   // been mangled by the compaction.
2218 
2219   scan_and_adjust_pointers(this);
2220   // See note about verification in prepare_for_compaction().
2221 }
2222 
compact()2223 void CompactibleFreeListSpace::compact() {
2224   scan_and_compact(this);
2225 }
2226 
2227 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2228 // where fbs is free block sizes
flsFrag() const2229 double CompactibleFreeListSpace::flsFrag() const {
2230   size_t itabFree = totalSizeInIndexedFreeLists();
2231   double frag = 0.0;
2232   size_t i;
2233 
2234   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2235     double sz  = i;
2236     frag      += _indexedFreeList[i].count() * (sz * sz);
2237   }
2238 
2239   double totFree = itabFree +
2240                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2241   if (totFree > 0) {
2242     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2243             (totFree * totFree));
2244     frag = (double)1.0  - frag;
2245   } else {
2246     assert(frag == 0.0, "Follows from totFree == 0");
2247   }
2248   return frag;
2249 }
2250 
beginSweepFLCensus(float inter_sweep_current,float inter_sweep_estimate,float intra_sweep_estimate)2251 void CompactibleFreeListSpace::beginSweepFLCensus(
2252   float inter_sweep_current,
2253   float inter_sweep_estimate,
2254   float intra_sweep_estimate) {
2255   assert_locked();
2256   size_t i;
2257   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2258     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2259     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2260     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2261     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2262     fl->set_before_sweep(fl->count());
2263     fl->set_bfr_surp(fl->surplus());
2264   }
2265   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2266                                     inter_sweep_current,
2267                                     inter_sweep_estimate,
2268                                     intra_sweep_estimate);
2269 }
2270 
setFLSurplus()2271 void CompactibleFreeListSpace::setFLSurplus() {
2272   assert_locked();
2273   size_t i;
2274   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2275     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2276     fl->set_surplus(fl->count() -
2277                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2278   }
2279 }
2280 
setFLHints()2281 void CompactibleFreeListSpace::setFLHints() {
2282   assert_locked();
2283   size_t i;
2284   size_t h = IndexSetSize;
2285   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2286     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2287     fl->set_hint(h);
2288     if (fl->surplus() > 0) {
2289       h = i;
2290     }
2291   }
2292 }
2293 
clearFLCensus()2294 void CompactibleFreeListSpace::clearFLCensus() {
2295   assert_locked();
2296   size_t i;
2297   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2298     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2299     fl->set_prev_sweep(fl->count());
2300     fl->set_coal_births(0);
2301     fl->set_coal_deaths(0);
2302     fl->set_split_births(0);
2303     fl->set_split_deaths(0);
2304   }
2305 }
2306 
endSweepFLCensus(size_t sweep_count)2307 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2308   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2309   setFLSurplus();
2310   setFLHints();
2311   printFLCensus(sweep_count);
2312   clearFLCensus();
2313   assert_locked();
2314   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2315 }
2316 
coalOverPopulated(size_t size)2317 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2318   if (size < SmallForDictionary) {
2319     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2320     return (fl->coal_desired() < 0) ||
2321            ((int)fl->count() > fl->coal_desired());
2322   } else {
2323     return dictionary()->coal_dict_over_populated(size);
2324   }
2325 }
2326 
smallCoalBirth(size_t size)2327 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2328   assert(size < SmallForDictionary, "Size too large for indexed list");
2329   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2330   fl->increment_coal_births();
2331   fl->increment_surplus();
2332 }
2333 
smallCoalDeath(size_t size)2334 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2335   assert(size < SmallForDictionary, "Size too large for indexed list");
2336   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2337   fl->increment_coal_deaths();
2338   fl->decrement_surplus();
2339 }
2340 
coalBirth(size_t size)2341 void CompactibleFreeListSpace::coalBirth(size_t size) {
2342   if (size  < SmallForDictionary) {
2343     smallCoalBirth(size);
2344   } else {
2345     dictionary()->dict_census_update(size,
2346                                    false /* split */,
2347                                    true /* birth */);
2348   }
2349 }
2350 
coalDeath(size_t size)2351 void CompactibleFreeListSpace::coalDeath(size_t size) {
2352   if(size  < SmallForDictionary) {
2353     smallCoalDeath(size);
2354   } else {
2355     dictionary()->dict_census_update(size,
2356                                    false /* split */,
2357                                    false /* birth */);
2358   }
2359 }
2360 
smallSplitBirth(size_t size)2361 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2362   assert(size < SmallForDictionary, "Size too large for indexed list");
2363   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2364   fl->increment_split_births();
2365   fl->increment_surplus();
2366 }
2367 
smallSplitDeath(size_t size)2368 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2369   assert(size < SmallForDictionary, "Size too large for indexed list");
2370   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2371   fl->increment_split_deaths();
2372   fl->decrement_surplus();
2373 }
2374 
split_birth(size_t size)2375 void CompactibleFreeListSpace::split_birth(size_t size) {
2376   if (size  < SmallForDictionary) {
2377     smallSplitBirth(size);
2378   } else {
2379     dictionary()->dict_census_update(size,
2380                                    true /* split */,
2381                                    true /* birth */);
2382   }
2383 }
2384 
splitDeath(size_t size)2385 void CompactibleFreeListSpace::splitDeath(size_t size) {
2386   if (size  < SmallForDictionary) {
2387     smallSplitDeath(size);
2388   } else {
2389     dictionary()->dict_census_update(size,
2390                                    true /* split */,
2391                                    false /* birth */);
2392   }
2393 }
2394 
split(size_t from,size_t to1)2395 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2396   size_t to2 = from - to1;
2397   splitDeath(from);
2398   split_birth(to1);
2399   split_birth(to2);
2400 }
2401 
print() const2402 void CompactibleFreeListSpace::print() const {
2403   print_on(tty);
2404 }
2405 
prepare_for_verify()2406 void CompactibleFreeListSpace::prepare_for_verify() {
2407   assert_locked();
2408   repairLinearAllocationBlocks();
2409   // Verify that the SpoolBlocks look like free blocks of
2410   // appropriate sizes... To be done ...
2411 }
2412 
2413 class VerifyAllBlksClosure: public BlkClosure {
2414  private:
2415   const CompactibleFreeListSpace* _sp;
2416   const MemRegion                 _span;
2417   HeapWord*                       _last_addr;
2418   size_t                          _last_size;
2419   bool                            _last_was_obj;
2420   bool                            _last_was_live;
2421 
2422  public:
VerifyAllBlksClosure(const CompactibleFreeListSpace * sp,MemRegion span)2423   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2424     MemRegion span) :  _sp(sp), _span(span),
2425                        _last_addr(NULL), _last_size(0),
2426                        _last_was_obj(false), _last_was_live(false) { }
2427 
do_blk(HeapWord * addr)2428   virtual size_t do_blk(HeapWord* addr) {
2429     size_t res;
2430     bool   was_obj  = false;
2431     bool   was_live = false;
2432     if (_sp->block_is_obj(addr)) {
2433       was_obj = true;
2434       oop p = oop(addr);
2435       guarantee(oopDesc::is_oop(p), "Should be an oop");
2436       res = _sp->adjustObjectSize(p->size());
2437       if (_sp->obj_is_alive(addr)) {
2438         was_live = true;
2439         p->verify();
2440       }
2441     } else {
2442       FreeChunk* fc = (FreeChunk*)addr;
2443       res = fc->size();
2444       if (FLSVerifyLists && !fc->cantCoalesce()) {
2445         guarantee(_sp->verify_chunk_in_free_list(fc),
2446                   "Chunk should be on a free list");
2447       }
2448     }
2449     if (res == 0) {
2450       Log(gc, verify) log;
2451       log.error("Livelock: no rank reduction!");
2452       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2453                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2454         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2455         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2456       LogStream ls(log.error());
2457       _sp->print_on(&ls);
2458       guarantee(false, "Verification failed.");
2459     }
2460     _last_addr = addr;
2461     _last_size = res;
2462     _last_was_obj  = was_obj;
2463     _last_was_live = was_live;
2464     return res;
2465   }
2466 };
2467 
2468 class VerifyAllOopsClosure: public BasicOopIterateClosure {
2469  private:
2470   const CMSCollector*             _collector;
2471   const CompactibleFreeListSpace* _sp;
2472   const MemRegion                 _span;
2473   const bool                      _past_remark;
2474   const CMSBitMap*                _bit_map;
2475 
2476  protected:
do_oop(void * p,oop obj)2477   void do_oop(void* p, oop obj) {
2478     if (_span.contains(obj)) { // the interior oop points into CMS heap
2479       if (!_span.contains(p)) { // reference from outside CMS heap
2480         // Should be a valid object; the first disjunct below allows
2481         // us to sidestep an assertion in block_is_obj() that insists
2482         // that p be in _sp. Note that several generations (and spaces)
2483         // are spanned by _span (CMS heap) above.
2484         guarantee(!_sp->is_in_reserved(obj) ||
2485                   _sp->block_is_obj((HeapWord*)obj),
2486                   "Should be an object");
2487         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2488         obj->verify();
2489         if (_past_remark) {
2490           // Remark has been completed, the object should be marked
2491           _bit_map->isMarked((HeapWord*)obj);
2492         }
2493       } else { // reference within CMS heap
2494         if (_past_remark) {
2495           // Remark has been completed -- so the referent should have
2496           // been marked, if referring object is.
2497           if (_bit_map->isMarked(_collector->block_start(p))) {
2498             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2499           }
2500         }
2501       }
2502     } else if (_sp->is_in_reserved(p)) {
2503       // the reference is from FLS, and points out of FLS
2504       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2505       obj->verify();
2506     }
2507   }
2508 
do_oop_work(T * p)2509   template <class T> void do_oop_work(T* p) {
2510     T heap_oop = RawAccess<>::oop_load(p);
2511     if (!CompressedOops::is_null(heap_oop)) {
2512       oop obj = CompressedOops::decode_not_null(heap_oop);
2513       do_oop(p, obj);
2514     }
2515   }
2516 
2517  public:
VerifyAllOopsClosure(const CMSCollector * collector,const CompactibleFreeListSpace * sp,MemRegion span,bool past_remark,CMSBitMap * bit_map)2518   VerifyAllOopsClosure(const CMSCollector* collector,
2519     const CompactibleFreeListSpace* sp, MemRegion span,
2520     bool past_remark, CMSBitMap* bit_map) :
2521     _collector(collector), _sp(sp), _span(span),
2522     _past_remark(past_remark), _bit_map(bit_map) { }
2523 
do_oop(oop * p)2524   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
do_oop(narrowOop * p)2525   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2526 };
2527 
verify() const2528 void CompactibleFreeListSpace::verify() const {
2529   assert_lock_strong(&_freelistLock);
2530   verify_objects_initialized();
2531   MemRegion span = _collector->_span;
2532   bool past_remark = (_collector->abstract_state() ==
2533                       CMSCollector::Sweeping);
2534 
2535   ResourceMark rm;
2536   HandleMark  hm;
2537 
2538   // Check integrity of CFL data structures
2539   _promoInfo.verify();
2540   _dictionary->verify();
2541   if (FLSVerifyIndexTable) {
2542     verifyIndexedFreeLists();
2543   }
2544   // Check integrity of all objects and free blocks in space
2545   {
2546     VerifyAllBlksClosure cl(this, span);
2547     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2548   }
2549   // Check that all references in the heap to FLS
2550   // are to valid objects in FLS or that references in
2551   // FLS are to valid objects elsewhere in the heap
2552   if (FLSVerifyAllHeapReferences)
2553   {
2554     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2555       _collector->markBitMap());
2556 
2557     // Iterate over all oops in the heap.
2558     CMSHeap::heap()->oop_iterate(&cl);
2559   }
2560 
2561   if (VerifyObjectStartArray) {
2562     // Verify the block offset table
2563     _bt.verify();
2564   }
2565 }
2566 
2567 #ifndef PRODUCT
verifyFreeLists() const2568 void CompactibleFreeListSpace::verifyFreeLists() const {
2569   if (FLSVerifyLists) {
2570     _dictionary->verify();
2571     verifyIndexedFreeLists();
2572   } else {
2573     if (FLSVerifyDictionary) {
2574       _dictionary->verify();
2575     }
2576     if (FLSVerifyIndexTable) {
2577       verifyIndexedFreeLists();
2578     }
2579   }
2580 }
2581 #endif
2582 
verifyIndexedFreeLists() const2583 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2584   size_t i = 0;
2585   for (; i < IndexSetStart; i++) {
2586     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2587   }
2588   for (; i < IndexSetSize; i++) {
2589     verifyIndexedFreeList(i);
2590   }
2591 }
2592 
verifyIndexedFreeList(size_t size) const2593 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2594   FreeChunk* fc   =  _indexedFreeList[size].head();
2595   FreeChunk* tail =  _indexedFreeList[size].tail();
2596   size_t    num = _indexedFreeList[size].count();
2597   size_t      n = 0;
2598   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2599             "Slot should have been empty");
2600   for (; fc != NULL; fc = fc->next(), n++) {
2601     guarantee(fc->size() == size, "Size inconsistency");
2602     guarantee(fc->is_free(), "!free?");
2603     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2604     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2605   }
2606   guarantee(n == num, "Incorrect count");
2607 }
2608 
2609 #ifndef PRODUCT
check_free_list_consistency() const2610 void CompactibleFreeListSpace::check_free_list_consistency() const {
2611   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2612     "Some sizes can't be allocated without recourse to"
2613     " linear allocation buffers");
2614   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2615     "else MIN_TREE_CHUNK_SIZE is wrong");
2616   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2617   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2618 }
2619 #endif
2620 
printFLCensus(size_t sweep_count) const2621 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2622   assert_lock_strong(&_freelistLock);
2623   LogTarget(Debug, gc, freelist, census) log;
2624   if (!log.is_enabled()) {
2625     return;
2626   }
2627   AdaptiveFreeList<FreeChunk> total;
2628   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2629   ResourceMark rm;
2630   LogStream ls(log);
2631   outputStream* out = &ls;
2632   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2633   size_t total_free = 0;
2634   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2635     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2636     total_free += fl->count() * fl->size();
2637     if (i % (40*IndexSetStride) == 0) {
2638       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2639     }
2640     fl->print_on(out);
2641     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2642     total.set_surplus(    total.surplus()     + fl->surplus()    );
2643     total.set_desired(    total.desired()     + fl->desired()    );
2644     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2645     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2646     total.set_count(      total.count()       + fl->count()      );
2647     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2648     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2649     total.set_split_births(total.split_births() + fl->split_births());
2650     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2651   }
2652   total.print_on(out, "TOTAL");
2653   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2654   log.print("growth: %8.5f  deficit: %8.5f",
2655             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2656                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2657             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2658   _dictionary->print_dict_census(out);
2659 }
2660 
2661 ///////////////////////////////////////////////////////////////////////////
2662 // CompactibleFreeListSpaceLAB
2663 ///////////////////////////////////////////////////////////////////////////
2664 
2665 #define VECTOR_257(x)                                                                                  \
2666   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2667   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2668      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2669      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2670      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2671      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2672      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2673      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2674      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2675      x }
2676 
2677 // Initialize with default setting for CMS, _not_
2678 // generic OldPLABSize, whose static default is different; if overridden at the
2679 // command-line, this will get reinitialized via a call to
2680 // modify_initialization() below.
2681 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2682   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2683 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2684 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2685 
CompactibleFreeListSpaceLAB(CompactibleFreeListSpace * cfls)2686 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2687   _cfls(cfls)
2688 {
2689   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2690   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2691        i < CompactibleFreeListSpace::IndexSetSize;
2692        i += CompactibleFreeListSpace::IndexSetStride) {
2693     _indexedFreeList[i].set_size(i);
2694     _num_blocks[i] = 0;
2695   }
2696 }
2697 
2698 static bool _CFLS_LAB_modified = false;
2699 
modify_initialization(size_t n,unsigned wt)2700 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2701   assert(!_CFLS_LAB_modified, "Call only once");
2702   _CFLS_LAB_modified = true;
2703   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2704        i < CompactibleFreeListSpace::IndexSetSize;
2705        i += CompactibleFreeListSpace::IndexSetStride) {
2706     _blocks_to_claim[i].modify(n, wt, true /* force */);
2707   }
2708 }
2709 
alloc(size_t word_sz)2710 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2711   FreeChunk* res;
2712   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2713   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2714     // This locking manages sync with other large object allocations.
2715     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2716                     Mutex::_no_safepoint_check_flag);
2717     res = _cfls->getChunkFromDictionaryExact(word_sz);
2718     if (res == NULL) return NULL;
2719   } else {
2720     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2721     if (fl->count() == 0) {
2722       // Attempt to refill this local free list.
2723       get_from_global_pool(word_sz, fl);
2724       // If it didn't work, give up.
2725       if (fl->count() == 0) return NULL;
2726     }
2727     res = fl->get_chunk_at_head();
2728     assert(res != NULL, "Why was count non-zero?");
2729   }
2730   res->markNotFree();
2731   assert(!res->is_free(), "shouldn't be marked free");
2732   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2733   // mangle a just allocated object with a distinct pattern.
2734   debug_only(res->mangleAllocated(word_sz));
2735   return (HeapWord*)res;
2736 }
2737 
2738 // Get a chunk of blocks of the right size and update related
2739 // book-keeping stats
get_from_global_pool(size_t word_sz,AdaptiveFreeList<FreeChunk> * fl)2740 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2741   // Get the #blocks we want to claim
2742   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2743   assert(n_blks > 0, "Error");
2744   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2745   // In some cases, when the application has a phase change,
2746   // there may be a sudden and sharp shift in the object survival
2747   // profile, and updating the counts at the end of a scavenge
2748   // may not be quick enough, giving rise to large scavenge pauses
2749   // during these phase changes. It is beneficial to detect such
2750   // changes on-the-fly during a scavenge and avoid such a phase-change
2751   // pothole. The following code is a heuristic attempt to do that.
2752   // It is protected by a product flag until we have gained
2753   // enough experience with this heuristic and fine-tuned its behavior.
2754   // WARNING: This might increase fragmentation if we overreact to
2755   // small spikes, so some kind of historical smoothing based on
2756   // previous experience with the greater reactivity might be useful.
2757   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2758   // default.
2759   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2760     //
2761     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2762     // which is why there is a cast to double.
2763     //
2764     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2765     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2766     n_blks = MIN2(n_blks, CMSOldPLABMax);
2767   }
2768   assert(n_blks > 0, "Error");
2769   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2770   // Update stats table entry for this block size
2771   _num_blocks[word_sz] += fl->count();
2772 }
2773 
compute_desired_plab_size()2774 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2775   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2776        i < CompactibleFreeListSpace::IndexSetSize;
2777        i += CompactibleFreeListSpace::IndexSetStride) {
2778     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2779            "Counter inconsistency");
2780     if (_global_num_workers[i] > 0) {
2781       // Need to smooth wrt historical average
2782       if (ResizeOldPLAB) {
2783         _blocks_to_claim[i].sample(
2784           MAX2(CMSOldPLABMin,
2785           MIN2(CMSOldPLABMax,
2786                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2787       }
2788       // Reset counters for next round
2789       _global_num_workers[i] = 0;
2790       _global_num_blocks[i] = 0;
2791       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2792     }
2793   }
2794 }
2795 
2796 // If this is changed in the future to allow parallel
2797 // access, one would need to take the FL locks and,
2798 // depending on how it is used, stagger access from
2799 // parallel threads to reduce contention.
retire(int tid)2800 void CompactibleFreeListSpaceLAB::retire(int tid) {
2801   // We run this single threaded with the world stopped;
2802   // so no need for locks and such.
2803   NOT_PRODUCT(Thread* t = Thread::current();)
2804   assert(Thread::current()->is_VM_thread(), "Error");
2805   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2806        i < CompactibleFreeListSpace::IndexSetSize;
2807        i += CompactibleFreeListSpace::IndexSetStride) {
2808     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2809            "Can't retire more than what we obtained");
2810     if (_num_blocks[i] > 0) {
2811       size_t num_retire =  _indexedFreeList[i].count();
2812       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2813       {
2814         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2815         //                Mutex::_no_safepoint_check_flag);
2816 
2817         // Update globals stats for num_blocks used
2818         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2819         _global_num_workers[i]++;
2820         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2821         if (num_retire > 0) {
2822           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2823           // Reset this list.
2824           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2825           _indexedFreeList[i].set_size(i);
2826         }
2827       }
2828       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2829                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2830       // Reset stats for next round
2831       _num_blocks[i]         = 0;
2832     }
2833   }
2834 }
2835 
2836 // Used by par_get_chunk_of_blocks() for the chunks from the
2837 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2838 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2839 // to the free list "fl".  "n" is the maximum number of chunks to
2840 // be added to "fl".
par_get_chunk_of_blocks_IFL(size_t word_sz,size_t n,AdaptiveFreeList<FreeChunk> * fl)2841 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2842 
2843   // We'll try all multiples of word_sz in the indexed set, starting with
2844   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2845   // then try getting a big chunk and splitting it.
2846   {
2847     bool found;
2848     int  k;
2849     size_t cur_sz;
2850     for (k = 1, cur_sz = k * word_sz, found = false;
2851          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2852          (CMSSplitIndexedFreeListBlocks || k <= 1);
2853          k++, cur_sz = k * word_sz) {
2854       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2855       fl_for_cur_sz.set_size(cur_sz);
2856       {
2857         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2858                         Mutex::_no_safepoint_check_flag);
2859         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2860         if (gfl->count() != 0) {
2861           // nn is the number of chunks of size cur_sz that
2862           // we'd need to split k-ways each, in order to create
2863           // "n" chunks of size word_sz each.
2864           const size_t nn = MAX2(n/k, (size_t)1);
2865           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2866           found = true;
2867           if (k > 1) {
2868             // Update split death stats for the cur_sz-size blocks list:
2869             // we increment the split death count by the number of blocks
2870             // we just took from the cur_sz-size blocks list and which
2871             // we will be splitting below.
2872             ssize_t deaths = gfl->split_deaths() +
2873                              fl_for_cur_sz.count();
2874             gfl->set_split_deaths(deaths);
2875           }
2876         }
2877       }
2878       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2879       if (found) {
2880         if (k == 1) {
2881           fl->prepend(&fl_for_cur_sz);
2882         } else {
2883           // Divide each block on fl_for_cur_sz up k ways.
2884           FreeChunk* fc;
2885           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2886             // Must do this in reverse order, so that anybody attempting to
2887             // access the main chunk sees it as a single free block until we
2888             // change it.
2889             size_t fc_size = fc->size();
2890             assert(fc->is_free(), "Error");
2891             for (int i = k-1; i >= 0; i--) {
2892               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2893               assert((i != 0) ||
2894                         ((fc == ffc) && ffc->is_free() &&
2895                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2896                         "Counting error");
2897               ffc->set_size(word_sz);
2898               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2899               ffc->link_next(NULL);
2900               // Above must occur before BOT is updated below.
2901               OrderAccess::storestore();
2902               // splitting from the right, fc_size == i * word_sz
2903               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2904               fc_size -= word_sz;
2905               assert(fc_size == i*word_sz, "Error");
2906               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2907               _bt.verify_single_block((HeapWord*)fc, fc_size);
2908               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2909               // Push this on "fl".
2910               fl->return_chunk_at_head(ffc);
2911             }
2912             // TRAP
2913             assert(fl->tail()->next() == NULL, "List invariant.");
2914           }
2915         }
2916         // Update birth stats for this block size.
2917         size_t num = fl->count();
2918         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2919                         Mutex::_no_safepoint_check_flag);
2920         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2921         _indexedFreeList[word_sz].set_split_births(births);
2922         return true;
2923       }
2924     }
2925     return found;
2926   }
2927 }
2928 
get_n_way_chunk_to_split(size_t word_sz,size_t n)2929 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2930 
2931   FreeChunk* fc = NULL;
2932   FreeChunk* rem_fc = NULL;
2933   size_t rem;
2934   {
2935     MutexLockerEx x(parDictionaryAllocLock(),
2936                     Mutex::_no_safepoint_check_flag);
2937     while (n > 0) {
2938       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2939       if (fc != NULL) {
2940         break;
2941       } else {
2942         n--;
2943       }
2944     }
2945     if (fc == NULL) return NULL;
2946     // Otherwise, split up that block.
2947     assert((ssize_t)n >= 1, "Control point invariant");
2948     assert(fc->is_free(), "Error: should be a free block");
2949     _bt.verify_single_block((HeapWord*)fc, fc->size());
2950     const size_t nn = fc->size() / word_sz;
2951     n = MIN2(nn, n);
2952     assert((ssize_t)n >= 1, "Control point invariant");
2953     rem = fc->size() - n * word_sz;
2954     // If there is a remainder, and it's too small, allocate one fewer.
2955     if (rem > 0 && rem < MinChunkSize) {
2956       n--; rem += word_sz;
2957     }
2958     // Note that at this point we may have n == 0.
2959     assert((ssize_t)n >= 0, "Control point invariant");
2960 
2961     // If n is 0, the chunk fc that was found is not large
2962     // enough to leave a viable remainder.  We are unable to
2963     // allocate even one block.  Return fc to the
2964     // dictionary and return, leaving "fl" empty.
2965     if (n == 0) {
2966       returnChunkToDictionary(fc);
2967       return NULL;
2968     }
2969 
2970     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2971     dictionary()->dict_census_update(fc->size(),
2972                                      true /*split*/,
2973                                      false /*birth*/);
2974 
2975     // First return the remainder, if any.
2976     // Note that we hold the lock until we decide if we're going to give
2977     // back the remainder to the dictionary, since a concurrent allocation
2978     // may otherwise see the heap as empty.  (We're willing to take that
2979     // hit if the block is a small block.)
2980     if (rem > 0) {
2981       size_t prefix_size = n * word_sz;
2982       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2983       rem_fc->set_size(rem);
2984       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2985       rem_fc->link_next(NULL);
2986       // Above must occur before BOT is updated below.
2987       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2988       OrderAccess::storestore();
2989       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2990       assert(fc->is_free(), "Error");
2991       fc->set_size(prefix_size);
2992       if (rem >= IndexSetSize) {
2993         returnChunkToDictionary(rem_fc);
2994         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2995         rem_fc = NULL;
2996       }
2997       // Otherwise, return it to the small list below.
2998     }
2999   }
3000   if (rem_fc != NULL) {
3001     MutexLockerEx x(_indexedFreeListParLocks[rem],
3002                     Mutex::_no_safepoint_check_flag);
3003     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
3004     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
3005     smallSplitBirth(rem);
3006   }
3007   assert(n * word_sz == fc->size(),
3008          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
3009          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
3010          fc->size(), n, word_sz);
3011   return fc;
3012 }
3013 
par_get_chunk_of_blocks_dictionary(size_t word_sz,size_t targetted_number_of_chunks,AdaptiveFreeList<FreeChunk> * fl)3014 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
3015 
3016   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
3017 
3018   if (fc == NULL) {
3019     return;
3020   }
3021 
3022   size_t n = fc->size() / word_sz;
3023 
3024   assert((ssize_t)n > 0, "Consistency");
3025   // Now do the splitting up.
3026   // Must do this in reverse order, so that anybody attempting to
3027   // access the main chunk sees it as a single free block until we
3028   // change it.
3029   size_t fc_size = n * word_sz;
3030   // All but first chunk in this loop
3031   for (ssize_t i = n-1; i > 0; i--) {
3032     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
3033     ffc->set_size(word_sz);
3034     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
3035     ffc->link_next(NULL);
3036     // Above must occur before BOT is updated below.
3037     OrderAccess::storestore();
3038     // splitting from the right, fc_size == (n - i + 1) * wordsize
3039     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
3040     fc_size -= word_sz;
3041     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
3042     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
3043     _bt.verify_single_block((HeapWord*)fc, fc_size);
3044     // Push this on "fl".
3045     fl->return_chunk_at_head(ffc);
3046   }
3047   // First chunk
3048   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
3049   // The blocks above should show their new sizes before the first block below
3050   fc->set_size(word_sz);
3051   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
3052   fc->link_next(NULL);
3053   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
3054   _bt.verify_single_block((HeapWord*)fc, fc->size());
3055   fl->return_chunk_at_head(fc);
3056 
3057   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
3058   {
3059     // Update the stats for this block size.
3060     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
3061                     Mutex::_no_safepoint_check_flag);
3062     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
3063     _indexedFreeList[word_sz].set_split_births(births);
3064     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
3065     // _indexedFreeList[word_sz].set_surplus(new_surplus);
3066   }
3067 
3068   // TRAP
3069   assert(fl->tail()->next() == NULL, "List invariant.");
3070 }
3071 
par_get_chunk_of_blocks(size_t word_sz,size_t n,AdaptiveFreeList<FreeChunk> * fl)3072 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
3073   assert(fl->count() == 0, "Precondition.");
3074   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
3075          "Precondition");
3076 
3077   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
3078     // Got it
3079     return;
3080   }
3081 
3082   // Otherwise, we'll split a block from the dictionary.
3083   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
3084 }
3085 
max_flag_size_for_task_size() const3086 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
3087   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
3088   return ergo_max;
3089 }
3090 
3091 // Set up the space's par_seq_tasks structure for work claiming
3092 // for parallel rescan. See CMSParRemarkTask where this is currently used.
3093 // XXX Need to suitably abstract and generalize this and the next
3094 // method into one.
3095 void
3096 CompactibleFreeListSpace::
initialize_sequential_subtasks_for_rescan(int n_threads)3097 initialize_sequential_subtasks_for_rescan(int n_threads) {
3098   // The "size" of each task is fixed according to rescan_task_size.
3099   assert(n_threads > 0, "Unexpected n_threads argument");
3100   const size_t task_size = rescan_task_size();
3101   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
3102   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
3103   assert(n_tasks == 0 ||
3104          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3105           (used_region().start() + n_tasks*task_size >= used_region().end())),
3106          "n_tasks calculation incorrect");
3107   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3108   assert(!pst->valid(), "Clobbering existing data?");
3109   // Sets the condition for completion of the subtask (how many threads
3110   // need to finish in order to be done).
3111   pst->set_n_threads(n_threads);
3112   pst->set_n_tasks((int)n_tasks);
3113 }
3114 
3115 // Set up the space's par_seq_tasks structure for work claiming
3116 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3117 void
3118 CompactibleFreeListSpace::
initialize_sequential_subtasks_for_marking(int n_threads,HeapWord * low)3119 initialize_sequential_subtasks_for_marking(int n_threads,
3120                                            HeapWord* low) {
3121   // The "size" of each task is fixed according to rescan_task_size.
3122   assert(n_threads > 0, "Unexpected n_threads argument");
3123   const size_t task_size = marking_task_size();
3124   assert(task_size > CardTable::card_size_in_words &&
3125          (task_size %  CardTable::card_size_in_words == 0),
3126          "Otherwise arithmetic below would be incorrect");
3127   MemRegion span = _old_gen->reserved();
3128   if (low != NULL) {
3129     if (span.contains(low)) {
3130       // Align low down to  a card boundary so that
3131       // we can use block_offset_careful() on span boundaries.
3132       HeapWord* aligned_low = align_down(low, CardTable::card_size);
3133       // Clip span prefix at aligned_low
3134       span = span.intersection(MemRegion(aligned_low, span.end()));
3135     } else if (low > span.end()) {
3136       span = MemRegion(low, low);  // Null region
3137     } // else use entire span
3138   }
3139   assert(span.is_empty() ||
3140          ((uintptr_t)span.start() %  CardTable::card_size == 0),
3141         "span should start at a card boundary");
3142   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3143   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3144   assert(n_tasks == 0 ||
3145          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3146           (span.start() + n_tasks*task_size >= span.end())),
3147          "n_tasks calculation incorrect");
3148   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3149   assert(!pst->valid(), "Clobbering existing data?");
3150   // Sets the condition for completion of the subtask (how many threads
3151   // need to finish in order to be done).
3152   pst->set_n_threads(n_threads);
3153   pst->set_n_tasks((int)n_tasks);
3154 }
3155