1 /*
2  * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  *
22  */
23 
24 #include "precompiled.hpp"
25 
26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
28 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
29 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
30 #include "logging/logStream.hpp"
31 #include "runtime/orderAccess.hpp"
32 
ShenandoahFreeSet(ShenandoahHeap * heap,size_t max_regions)33 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
34   _heap(heap),
35   _mutator_free_bitmap(max_regions, mtGC),
36   _collector_free_bitmap(max_regions, mtGC),
37   _max(max_regions)
38 {
39   clear_internal();
40 }
41 
increase_used(size_t num_bytes)42 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
43   shenandoah_assert_heaplocked();
44   _used += num_bytes;
45 
46   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
47          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
48 }
49 
is_mutator_free(size_t idx) const50 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
51   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
52           idx, _max, _mutator_leftmost, _mutator_rightmost);
53   return _mutator_free_bitmap.at(idx);
54 }
55 
is_collector_free(size_t idx) const56 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
57   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
58           idx, _max, _collector_leftmost, _collector_rightmost);
59   return _collector_free_bitmap.at(idx);
60 }
61 
allocate_single(ShenandoahAllocRequest & req,bool & in_new_region)62 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
63   // Scan the bitmap looking for a first fit.
64   //
65   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
66   // we would find the region to allocate at right away.
67   //
68   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
69   // go to the end. This makes application allocation faster, because we would clear lots
70   // of regions from the beginning most of the time.
71   //
72   // Free set maintains mutator and collector views, and normally they allocate in their views only,
73   // unless we special cases for stealing and mixed allocations.
74 
75   switch (req.type()) {
76     case ShenandoahAllocRequest::_alloc_tlab:
77     case ShenandoahAllocRequest::_alloc_shared: {
78 
79       // Try to allocate in the mutator view
80       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
81         if (is_mutator_free(idx)) {
82           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
83           if (result != NULL) {
84             return result;
85           }
86         }
87       }
88 
89       // There is no recovery. Mutator does not touch collector view at all.
90       break;
91     }
92     case ShenandoahAllocRequest::_alloc_gclab:
93     case ShenandoahAllocRequest::_alloc_shared_gc: {
94       // size_t is unsigned, need to dodge underflow when _leftmost = 0
95 
96       // Fast-path: try to allocate in the collector view first
97       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
98         size_t idx = c - 1;
99         if (is_collector_free(idx)) {
100           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
101           if (result != NULL) {
102             return result;
103           }
104         }
105       }
106 
107       // No dice. Can we borrow space from mutator view?
108       if (!ShenandoahEvacReserveOverflow) {
109         return NULL;
110       }
111 
112       // Try to steal the empty region from the mutator view
113       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
114         size_t idx = c - 1;
115         if (is_mutator_free(idx)) {
116           ShenandoahHeapRegion* r = _heap->get_region(idx);
117           if (can_allocate_from(r)) {
118             flip_to_gc(r);
119             HeapWord *result = try_allocate_in(r, req, in_new_region);
120             if (result != NULL) {
121               return result;
122             }
123           }
124         }
125       }
126 
127       // No dice. Do not try to mix mutator and GC allocations, because
128       // URWM moves due to GC allocations would expose unparsable mutator
129       // allocations.
130 
131       break;
132     }
133     default:
134       ShouldNotReachHere();
135   }
136 
137   return NULL;
138 }
139 
try_allocate_in(ShenandoahHeapRegion * r,ShenandoahAllocRequest & req,bool & in_new_region)140 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
141   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
142 
143   if (_heap->is_concurrent_root_in_progress() &&
144       r->is_trash()) {
145     return NULL;
146   }
147 
148   try_recycle_trashed(r);
149 
150   in_new_region = r->is_empty();
151 
152   HeapWord* result = NULL;
153   size_t size = req.size();
154 
155   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
156     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
157     if (size > free) {
158       size = free;
159     }
160     if (size >= req.min_size()) {
161       result = r->allocate(size, req.type());
162       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
163     }
164   } else {
165     result = r->allocate(size, req.type());
166   }
167 
168   if (result != NULL) {
169     // Allocation successful, bump stats:
170     if (req.is_mutator_alloc()) {
171       increase_used(size * HeapWordSize);
172     }
173 
174     // Record actual allocation size
175     req.set_actual_size(size);
176 
177     if (req.is_gc_alloc()) {
178       r->set_update_watermark(r->top());
179     }
180   }
181 
182   if (result == NULL || has_no_alloc_capacity(r)) {
183     // Region cannot afford this or future allocations. Retire it.
184     //
185     // While this seems a bit harsh, especially in the case when this large allocation does not
186     // fit, but the next small one would, we are risking to inflate scan times when lots of
187     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
188     // TODO: Record first fully-empty region, and use that for large allocations
189 
190     // Record the remainder as allocation waste
191     if (req.is_mutator_alloc()) {
192       size_t waste = r->free();
193       if (waste > 0) {
194         increase_used(waste);
195         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
196       }
197     }
198 
199     size_t num = r->index();
200     _collector_free_bitmap.clear_bit(num);
201     _mutator_free_bitmap.clear_bit(num);
202     // Touched the bounds? Need to update:
203     if (touches_bounds(num)) {
204       adjust_bounds();
205     }
206     assert_bounds();
207   }
208   return result;
209 }
210 
touches_bounds(size_t num) const211 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
212   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
213 }
214 
recompute_bounds()215 void ShenandoahFreeSet::recompute_bounds() {
216   // Reset to the most pessimistic case:
217   _mutator_rightmost = _max - 1;
218   _mutator_leftmost = 0;
219   _collector_rightmost = _max - 1;
220   _collector_leftmost = 0;
221 
222   // ...and adjust from there
223   adjust_bounds();
224 }
225 
adjust_bounds()226 void ShenandoahFreeSet::adjust_bounds() {
227   // Rewind both mutator bounds until the next bit.
228   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
229     _mutator_leftmost++;
230   }
231   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
232     _mutator_rightmost--;
233   }
234   // Rewind both collector bounds until the next bit.
235   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
236     _collector_leftmost++;
237   }
238   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
239     _collector_rightmost--;
240   }
241 }
242 
allocate_contiguous(ShenandoahAllocRequest & req)243 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
244   shenandoah_assert_heaplocked();
245 
246   size_t words_size = req.size();
247   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
248 
249   // No regions left to satisfy allocation, bye.
250   if (num > mutator_count()) {
251     return NULL;
252   }
253 
254   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
255   // inclusive. Contiguous allocations are biased to the beginning.
256 
257   size_t beg = _mutator_leftmost;
258   size_t end = beg;
259 
260   while (true) {
261     if (end >= _max) {
262       // Hit the end, goodbye
263       return NULL;
264     }
265 
266     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
267     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
268     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
269       end++;
270       beg = end;
271       continue;
272     }
273 
274     if ((end - beg + 1) == num) {
275       // found the match
276       break;
277     }
278 
279     end++;
280   };
281 
282   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
283 
284   // Initialize regions:
285   for (size_t i = beg; i <= end; i++) {
286     ShenandoahHeapRegion* r = _heap->get_region(i);
287     try_recycle_trashed(r);
288 
289     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
290     assert(r->is_empty(), "Should be empty");
291 
292     if (i == beg) {
293       r->make_humongous_start();
294     } else {
295       r->make_humongous_cont();
296     }
297 
298     // Trailing region may be non-full, record the remainder there
299     size_t used_words;
300     if ((i == end) && (remainder != 0)) {
301       used_words = remainder;
302     } else {
303       used_words = ShenandoahHeapRegion::region_size_words();
304     }
305 
306     r->set_top(r->bottom() + used_words);
307 
308     _mutator_free_bitmap.clear_bit(r->index());
309   }
310 
311   // While individual regions report their true use, all humongous regions are
312   // marked used in the free set.
313   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
314 
315   if (remainder != 0) {
316     // Record this remainder as allocation waste
317     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
318   }
319 
320   // Allocated at left/rightmost? Move the bounds appropriately.
321   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
322     adjust_bounds();
323   }
324   assert_bounds();
325 
326   req.set_actual_size(words_size);
327   return _heap->get_region(beg)->bottom();
328 }
329 
can_allocate_from(ShenandoahHeapRegion * r)330 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
331   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_root_in_progress());
332 }
333 
alloc_capacity(ShenandoahHeapRegion * r)334 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
335   if (r->is_trash()) {
336     // This would be recycled on allocation path
337     return ShenandoahHeapRegion::region_size_bytes();
338   } else {
339     return r->free();
340   }
341 }
342 
has_no_alloc_capacity(ShenandoahHeapRegion * r)343 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
344   return alloc_capacity(r) == 0;
345 }
346 
try_recycle_trashed(ShenandoahHeapRegion * r)347 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
348   if (r->is_trash()) {
349     _heap->decrease_used(r->used());
350     r->recycle();
351   }
352 }
353 
recycle_trash()354 void ShenandoahFreeSet::recycle_trash() {
355   // lock is not reentrable, check we don't have it
356   shenandoah_assert_not_heaplocked();
357 
358   for (size_t i = 0; i < _heap->num_regions(); i++) {
359     ShenandoahHeapRegion* r = _heap->get_region(i);
360     if (r->is_trash()) {
361       ShenandoahHeapLocker locker(_heap->lock());
362       try_recycle_trashed(r);
363     }
364     SpinPause(); // allow allocators to take the lock
365   }
366 }
367 
flip_to_gc(ShenandoahHeapRegion * r)368 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
369   size_t idx = r->index();
370 
371   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
372   assert(can_allocate_from(r), "Should not be allocated");
373 
374   _mutator_free_bitmap.clear_bit(idx);
375   _collector_free_bitmap.set_bit(idx);
376   _collector_leftmost = MIN2(idx, _collector_leftmost);
377   _collector_rightmost = MAX2(idx, _collector_rightmost);
378 
379   _capacity -= alloc_capacity(r);
380 
381   if (touches_bounds(idx)) {
382     adjust_bounds();
383   }
384   assert_bounds();
385 }
386 
clear()387 void ShenandoahFreeSet::clear() {
388   shenandoah_assert_heaplocked();
389   clear_internal();
390 }
391 
clear_internal()392 void ShenandoahFreeSet::clear_internal() {
393   _mutator_free_bitmap.clear();
394   _collector_free_bitmap.clear();
395   _mutator_leftmost = _max;
396   _mutator_rightmost = 0;
397   _collector_leftmost = _max;
398   _collector_rightmost = 0;
399   _capacity = 0;
400   _used = 0;
401 }
402 
rebuild()403 void ShenandoahFreeSet::rebuild() {
404   shenandoah_assert_heaplocked();
405   clear();
406 
407   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
408     ShenandoahHeapRegion* region = _heap->get_region(idx);
409     if (region->is_alloc_allowed() || region->is_trash()) {
410       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
411 
412       // Do not add regions that would surely fail allocation
413       if (has_no_alloc_capacity(region)) continue;
414 
415       _capacity += alloc_capacity(region);
416       assert(_used <= _capacity, "must not use more than we have");
417 
418       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
419       _mutator_free_bitmap.set_bit(idx);
420     }
421   }
422 
423   // Evac reserve: reserve trailing space for evacuations
424   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
425   size_t reserved = 0;
426 
427   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
428     if (reserved >= to_reserve) break;
429 
430     ShenandoahHeapRegion* region = _heap->get_region(idx);
431     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
432       _mutator_free_bitmap.clear_bit(idx);
433       _collector_free_bitmap.set_bit(idx);
434       size_t ac = alloc_capacity(region);
435       _capacity -= ac;
436       reserved += ac;
437     }
438   }
439 
440   recompute_bounds();
441   assert_bounds();
442 }
443 
log_status()444 void ShenandoahFreeSet::log_status() {
445   shenandoah_assert_heaplocked();
446 
447   LogTarget(Info, gc, ergo) lt;
448   if (lt.is_enabled()) {
449     ResourceMark rm;
450     LogStream ls(lt);
451 
452     {
453       size_t last_idx = 0;
454       size_t max = 0;
455       size_t max_contig = 0;
456       size_t empty_contig = 0;
457 
458       size_t total_used = 0;
459       size_t total_free = 0;
460       size_t total_free_ext = 0;
461 
462       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
463         if (is_mutator_free(idx)) {
464           ShenandoahHeapRegion *r = _heap->get_region(idx);
465           size_t free = alloc_capacity(r);
466 
467           max = MAX2(max, free);
468 
469           if (r->is_empty()) {
470             total_free_ext += free;
471             if (last_idx + 1 == idx) {
472               empty_contig++;
473             } else {
474               empty_contig = 1;
475             }
476           } else {
477             empty_contig = 0;
478           }
479 
480           total_used += r->used();
481           total_free += free;
482 
483           max_contig = MAX2(max_contig, empty_contig);
484           last_idx = idx;
485         }
486       }
487 
488       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
489       size_t free = capacity() - used();
490 
491       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
492                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
493                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
494                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
495       );
496 
497       ls.print("Frag: ");
498       size_t frag_ext;
499       if (total_free_ext > 0) {
500         frag_ext = 100 - (100 * max_humongous / total_free_ext);
501       } else {
502         frag_ext = 0;
503       }
504       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
505 
506       size_t frag_int;
507       if (mutator_count() > 0) {
508         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
509       } else {
510         frag_int = 0;
511       }
512       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
513     }
514 
515     {
516       size_t max = 0;
517       size_t total_free = 0;
518 
519       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
520         if (is_collector_free(idx)) {
521           ShenandoahHeapRegion *r = _heap->get_region(idx);
522           size_t free = alloc_capacity(r);
523           max = MAX2(max, free);
524           total_free += free;
525         }
526       }
527 
528       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
529                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
530                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
531     }
532   }
533 }
534 
allocate(ShenandoahAllocRequest & req,bool & in_new_region)535 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
536   shenandoah_assert_heaplocked();
537   assert_bounds();
538 
539   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
540     switch (req.type()) {
541       case ShenandoahAllocRequest::_alloc_shared:
542       case ShenandoahAllocRequest::_alloc_shared_gc:
543         in_new_region = true;
544         return allocate_contiguous(req);
545       case ShenandoahAllocRequest::_alloc_gclab:
546       case ShenandoahAllocRequest::_alloc_tlab:
547         in_new_region = false;
548         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
549                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
550         return NULL;
551       default:
552         ShouldNotReachHere();
553         return NULL;
554     }
555   } else {
556     return allocate_single(req, in_new_region);
557   }
558 }
559 
unsafe_peek_free() const560 size_t ShenandoahFreeSet::unsafe_peek_free() const {
561   // Deliberately not locked, this method is unsafe when free set is modified.
562 
563   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
564     if (index < _max && is_mutator_free(index)) {
565       ShenandoahHeapRegion* r = _heap->get_region(index);
566       if (r->free() >= MinTLABSize) {
567         return r->free();
568       }
569     }
570   }
571 
572   // It appears that no regions left
573   return 0;
574 }
575 
print_on(outputStream * out) const576 void ShenandoahFreeSet::print_on(outputStream* out) const {
577   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
578   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
579     if (is_mutator_free(index)) {
580       _heap->get_region(index)->print_on(out);
581     }
582   }
583   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
584   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
585     if (is_collector_free(index)) {
586       _heap->get_region(index)->print_on(out);
587     }
588   }
589 }
590 
591 /*
592  * Internal fragmentation metric: describes how fragmented the heap regions are.
593  *
594  * It is derived as:
595  *
596  *               sum(used[i]^2, i=0..k)
597  *   IF = 1 - ------------------------------
598  *              C * sum(used[i], i=0..k)
599  *
600  * ...where k is the number of regions in computation, C is the region capacity, and
601  * used[i] is the used space in the region.
602  *
603  * The non-linearity causes IF to be lower for the cases where the same total heap
604  * used is densely packed. For example:
605  *   a) Heap is completely full  => IF = 0
606  *   b) Heap is half full, first 50% regions are completely full => IF = 0
607  *   c) Heap is half full, each region is 50% full => IF = 1/2
608  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
609  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
610  *   f) Heap has one small object per each region => IF =~ 1
611  */
internal_fragmentation()612 double ShenandoahFreeSet::internal_fragmentation() {
613   double squared = 0;
614   double linear = 0;
615   int count = 0;
616 
617   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
618     if (is_mutator_free(index)) {
619       ShenandoahHeapRegion* r = _heap->get_region(index);
620       size_t used = r->used();
621       squared += used * used;
622       linear += used;
623       count++;
624     }
625   }
626 
627   if (count > 0) {
628     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
629     return 1 - s;
630   } else {
631     return 0;
632   }
633 }
634 
635 /*
636  * External fragmentation metric: describes how fragmented the heap is.
637  *
638  * It is derived as:
639  *
640  *   EF = 1 - largest_contiguous_free / total_free
641  *
642  * For example:
643  *   a) Heap is completely empty => EF = 0
644  *   b) Heap is completely full => EF = 0
645  *   c) Heap is first-half full => EF = 1/2
646  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
647  */
external_fragmentation()648 double ShenandoahFreeSet::external_fragmentation() {
649   size_t last_idx = 0;
650   size_t max_contig = 0;
651   size_t empty_contig = 0;
652 
653   size_t free = 0;
654 
655   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
656     if (is_mutator_free(index)) {
657       ShenandoahHeapRegion* r = _heap->get_region(index);
658       if (r->is_empty()) {
659         free += ShenandoahHeapRegion::region_size_bytes();
660         if (last_idx + 1 == index) {
661           empty_contig++;
662         } else {
663           empty_contig = 1;
664         }
665       } else {
666         empty_contig = 0;
667       }
668 
669       max_contig = MAX2(max_contig, empty_contig);
670       last_idx = index;
671     }
672   }
673 
674   if (free > 0) {
675     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
676   } else {
677     return 0;
678   }
679 }
680 
681 #ifdef ASSERT
assert_bounds() const682 void ShenandoahFreeSet::assert_bounds() const {
683   // Performance invariants. Failing these would not break the free set, but performance
684   // would suffer.
685   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
686   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
687 
688   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
689   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
690 
691   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
692   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
693   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
694   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
695 
696   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
697   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
698 
699   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
700   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
701 
702   beg_off = _collector_free_bitmap.get_next_one_offset(0);
703   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
704   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
705   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
706 }
707 #endif
708