1 /*
2  * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/universe.hpp"
28 
29 #include "gc/shared/gcArguments.hpp"
30 #include "gc/shared/gcTimer.hpp"
31 #include "gc/shared/gcTraceTime.inline.hpp"
32 #include "gc/shared/locationPrinter.inline.hpp"
33 #include "gc/shared/memAllocator.hpp"
34 #include "gc/shared/plab.hpp"
35 #include "gc/shared/tlab_globals.hpp"
36 
37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahInitLogger.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
55 #include "gc/shenandoah/shenandoahPadding.hpp"
56 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
57 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
58 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
59 #include "gc/shenandoah/shenandoahStringDedup.hpp"
60 #include "gc/shenandoah/shenandoahSTWMark.hpp"
61 #include "gc/shenandoah/shenandoahUtils.hpp"
62 #include "gc/shenandoah/shenandoahVerifier.hpp"
63 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
64 #include "gc/shenandoah/shenandoahVMOperations.hpp"
65 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
66 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
67 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
68 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
69 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
70 #if INCLUDE_JFR
71 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
72 #endif
73 
74 #include "classfile/systemDictionary.hpp"
75 #include "memory/classLoaderMetaspace.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "oops/compressedOops.inline.hpp"
78 #include "prims/jvmtiTagMap.hpp"
79 #include "runtime/atomic.hpp"
80 #include "runtime/globals.hpp"
81 #include "runtime/interfaceSupport.inline.hpp"
82 #include "runtime/java.hpp"
83 #include "runtime/orderAccess.hpp"
84 #include "runtime/safepointMechanism.hpp"
85 #include "runtime/vmThread.hpp"
86 #include "services/mallocTracker.hpp"
87 #include "services/memTracker.hpp"
88 #include "utilities/events.hpp"
89 #include "utilities/powerOfTwo.hpp"
90 
91 class ShenandoahPretouchHeapTask : public AbstractGangTask {
92 private:
93   ShenandoahRegionIterator _regions;
94   const size_t _page_size;
95 public:
ShenandoahPretouchHeapTask(size_t page_size)96   ShenandoahPretouchHeapTask(size_t page_size) :
97     AbstractGangTask("Shenandoah Pretouch Heap"),
98     _page_size(page_size) {}
99 
work(uint worker_id)100   virtual void work(uint worker_id) {
101     ShenandoahHeapRegion* r = _regions.next();
102     while (r != NULL) {
103       if (r->is_committed()) {
104         os::pretouch_memory(r->bottom(), r->end(), _page_size);
105       }
106       r = _regions.next();
107     }
108   }
109 };
110 
111 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
112 private:
113   ShenandoahRegionIterator _regions;
114   char* _bitmap_base;
115   const size_t _bitmap_size;
116   const size_t _page_size;
117 public:
ShenandoahPretouchBitmapTask(char * bitmap_base,size_t bitmap_size,size_t page_size)118   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
119     AbstractGangTask("Shenandoah Pretouch Bitmap"),
120     _bitmap_base(bitmap_base),
121     _bitmap_size(bitmap_size),
122     _page_size(page_size) {}
123 
work(uint worker_id)124   virtual void work(uint worker_id) {
125     ShenandoahHeapRegion* r = _regions.next();
126     while (r != NULL) {
127       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
128       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
129       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
130 
131       if (r->is_committed()) {
132         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
133       }
134 
135       r = _regions.next();
136     }
137   }
138 };
139 
initialize()140 jint ShenandoahHeap::initialize() {
141   //
142   // Figure out heap sizing
143   //
144 
145   size_t init_byte_size = InitialHeapSize;
146   size_t min_byte_size  = MinHeapSize;
147   size_t max_byte_size  = MaxHeapSize;
148   size_t heap_alignment = HeapAlignment;
149 
150   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
151 
152   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
153   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
154 
155   _num_regions = ShenandoahHeapRegion::region_count();
156   assert(_num_regions == (max_byte_size / reg_size_bytes),
157          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
158          _num_regions, max_byte_size, reg_size_bytes);
159 
160   // Now we know the number of regions, initialize the heuristics.
161   initialize_heuristics();
162 
163   size_t num_committed_regions = init_byte_size / reg_size_bytes;
164   num_committed_regions = MIN2(num_committed_regions, _num_regions);
165   assert(num_committed_regions <= _num_regions, "sanity");
166   _initial_size = num_committed_regions * reg_size_bytes;
167 
168   size_t num_min_regions = min_byte_size / reg_size_bytes;
169   num_min_regions = MIN2(num_min_regions, _num_regions);
170   assert(num_min_regions <= _num_regions, "sanity");
171   _minimum_size = num_min_regions * reg_size_bytes;
172 
173   // Default to max heap size.
174   _soft_max_size = _num_regions * reg_size_bytes;
175 
176   _committed = _initial_size;
177 
178   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
179   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
180   size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
181 
182   //
183   // Reserve and commit memory for heap
184   //
185 
186   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
187   initialize_reserved_region(heap_rs);
188   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
189   _heap_region_special = heap_rs.special();
190 
191   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
192          "Misaligned heap: " PTR_FORMAT, p2i(base()));
193 
194 #if SHENANDOAH_OPTIMIZED_MARKTASK
195   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
196   // Fail if we ever attempt to address more than we can.
197   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
198     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
199                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
200                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
201                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
202     vm_exit_during_initialization("Fatal Error", buf);
203   }
204 #endif
205 
206   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
207   if (!_heap_region_special) {
208     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
209                               "Cannot commit heap memory");
210   }
211 
212   //
213   // Reserve and commit memory for bitmap(s)
214   //
215 
216   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
217   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
218 
219   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
220 
221   guarantee(bitmap_bytes_per_region != 0,
222             "Bitmap bytes per region should not be zero");
223   guarantee(is_power_of_2(bitmap_bytes_per_region),
224             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
225 
226   if (bitmap_page_size > bitmap_bytes_per_region) {
227     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
228     _bitmap_bytes_per_slice = bitmap_page_size;
229   } else {
230     _bitmap_regions_per_slice = 1;
231     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
232   }
233 
234   guarantee(_bitmap_regions_per_slice >= 1,
235             "Should have at least one region per slice: " SIZE_FORMAT,
236             _bitmap_regions_per_slice);
237 
238   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
239             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
240             _bitmap_bytes_per_slice, bitmap_page_size);
241 
242   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
243   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
244   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
245   _bitmap_region_special = bitmap.special();
246 
247   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
248                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
249   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
250   if (!_bitmap_region_special) {
251     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
252                               "Cannot commit bitmap memory");
253   }
254 
255   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
256 
257   if (ShenandoahVerify) {
258     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
259     if (!verify_bitmap.special()) {
260       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
261                                 "Cannot commit verification bitmap memory");
262     }
263     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
264     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
265     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
266     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
267   }
268 
269   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
270   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
271   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
272   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
273   _aux_bitmap_region_special = aux_bitmap.special();
274   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
275 
276   //
277   // Create regions and region sets
278   //
279   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
280   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
281   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
282 
283   ReservedSpace region_storage(region_storage_size, region_page_size);
284   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
285   if (!region_storage.special()) {
286     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
287                               "Cannot commit region memory");
288   }
289 
290   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
291   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
292   // If not successful, bite a bullet and allocate at whatever address.
293   {
294     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
295     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
296 
297     uintptr_t min = round_up_power_of_2(cset_align);
298     uintptr_t max = (1u << 30u);
299 
300     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
301       char* req_addr = (char*)addr;
302       assert(is_aligned(req_addr, cset_align), "Should be aligned");
303       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
304       if (cset_rs.is_reserved()) {
305         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
306         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
307         break;
308       }
309     }
310 
311     if (_collection_set == NULL) {
312       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
313       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
314     }
315   }
316 
317   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
318   _free_set = new ShenandoahFreeSet(this, _num_regions);
319 
320   {
321     ShenandoahHeapLocker locker(lock());
322 
323     for (size_t i = 0; i < _num_regions; i++) {
324       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
325       bool is_committed = i < num_committed_regions;
326       void* loc = region_storage.base() + i * region_align;
327 
328       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
329       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
330 
331       _marking_context->initialize_top_at_mark_start(r);
332       _regions[i] = r;
333       assert(!collection_set()->is_in(i), "New region should not be in collection set");
334     }
335 
336     // Initialize to complete
337     _marking_context->mark_complete();
338 
339     _free_set->rebuild();
340   }
341 
342   if (AlwaysPreTouch) {
343     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
344     // before initialize() below zeroes it with initializing thread. For any given region,
345     // we touch the region and the corresponding bitmaps from the same thread.
346     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
347 
348     _pretouch_heap_page_size = heap_page_size;
349     _pretouch_bitmap_page_size = bitmap_page_size;
350 
351 #ifdef LINUX
352     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
353     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
354     // them into huge one. Therefore, we need to pretouch with smaller pages.
355     if (UseTransparentHugePages) {
356       _pretouch_heap_page_size = (size_t)os::vm_page_size();
357       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
358     }
359 #endif
360 
361     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
362     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
363 
364     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
365     _workers->run_task(&bcl);
366 
367     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
368     _workers->run_task(&hcl);
369   }
370 
371   //
372   // Initialize the rest of GC subsystems
373   //
374 
375   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
376   for (uint worker = 0; worker < _max_workers; worker++) {
377     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
378     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
379   }
380 
381   // There should probably be Shenandoah-specific options for these,
382   // just as there are G1-specific options.
383   {
384     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
385     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
386     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
387   }
388 
389   _monitoring_support = new ShenandoahMonitoringSupport(this);
390   _phase_timings = new ShenandoahPhaseTimings(max_workers());
391   ShenandoahCodeRoots::initialize();
392 
393   if (ShenandoahPacing) {
394     _pacer = new ShenandoahPacer(this);
395     _pacer->setup_for_idle();
396   } else {
397     _pacer = NULL;
398   }
399 
400   _control_thread = new ShenandoahControlThread();
401 
402   ShenandoahInitLogger::print();
403 
404   return JNI_OK;
405 }
406 
initialize_mode()407 void ShenandoahHeap::initialize_mode() {
408   if (ShenandoahGCMode != NULL) {
409     if (strcmp(ShenandoahGCMode, "satb") == 0) {
410       _gc_mode = new ShenandoahSATBMode();
411     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
412       _gc_mode = new ShenandoahIUMode();
413     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
414       _gc_mode = new ShenandoahPassiveMode();
415     } else {
416       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
417     }
418   } else {
419     ShouldNotReachHere();
420   }
421   _gc_mode->initialize_flags();
422   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
423     vm_exit_during_initialization(
424             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
425                     _gc_mode->name()));
426   }
427   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
428     vm_exit_during_initialization(
429             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
430                     _gc_mode->name()));
431   }
432 }
433 
initialize_heuristics()434 void ShenandoahHeap::initialize_heuristics() {
435   assert(_gc_mode != NULL, "Must be initialized");
436   _heuristics = _gc_mode->initialize_heuristics();
437 
438   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
439     vm_exit_during_initialization(
440             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
441                     _heuristics->name()));
442   }
443   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
444     vm_exit_during_initialization(
445             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
446                     _heuristics->name()));
447   }
448 }
449 
450 #ifdef _MSC_VER
451 #pragma warning( push )
452 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
453 #endif
454 
ShenandoahHeap(ShenandoahCollectorPolicy * policy)455 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
456   CollectedHeap(),
457   _initial_size(0),
458   _used(0),
459   _committed(0),
460   _bytes_allocated_since_gc_start(0),
461   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
462   _workers(NULL),
463   _safepoint_workers(NULL),
464   _heap_region_special(false),
465   _num_regions(0),
466   _regions(NULL),
467   _update_refs_iterator(this),
468   _control_thread(NULL),
469   _shenandoah_policy(policy),
470   _gc_mode(NULL),
471   _heuristics(NULL),
472   _free_set(NULL),
473   _pacer(NULL),
474   _verifier(NULL),
475   _phase_timings(NULL),
476   _monitoring_support(NULL),
477   _memory_pool(NULL),
478   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
479   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
480   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
481   _soft_ref_policy(),
482   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
483   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
484   _marking_context(NULL),
485   _bitmap_size(0),
486   _bitmap_regions_per_slice(0),
487   _bitmap_bytes_per_slice(0),
488   _bitmap_region_special(false),
489   _aux_bitmap_region_special(false),
490   _liveness_cache(NULL),
491   _collection_set(NULL)
492 {
493   // Initialize GC mode early, so we can adjust barrier support
494   initialize_mode();
495   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
496 
497   _max_workers = MAX2(_max_workers, 1U);
498   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
499                             /* are_GC_task_threads */ true,
500                             /* are_ConcurrentGC_threads */ true);
501   if (_workers == NULL) {
502     vm_exit_during_initialization("Failed necessary allocation.");
503   } else {
504     _workers->initialize_workers();
505   }
506 
507   if (ParallelGCThreads > 1) {
508     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
509                                                 ParallelGCThreads,
510                       /* are_GC_task_threads */ false,
511                  /* are_ConcurrentGC_threads */ false);
512     _safepoint_workers->initialize_workers();
513   }
514 }
515 
516 #ifdef _MSC_VER
517 #pragma warning( pop )
518 #endif
519 
520 class ShenandoahResetBitmapTask : public AbstractGangTask {
521 private:
522   ShenandoahRegionIterator _regions;
523 
524 public:
ShenandoahResetBitmapTask()525   ShenandoahResetBitmapTask() :
526     AbstractGangTask("Shenandoah Reset Bitmap") {}
527 
work(uint worker_id)528   void work(uint worker_id) {
529     ShenandoahHeapRegion* region = _regions.next();
530     ShenandoahHeap* heap = ShenandoahHeap::heap();
531     ShenandoahMarkingContext* const ctx = heap->marking_context();
532     while (region != NULL) {
533       if (heap->is_bitmap_slice_committed(region)) {
534         ctx->clear_bitmap(region);
535       }
536       region = _regions.next();
537     }
538   }
539 };
540 
reset_mark_bitmap()541 void ShenandoahHeap::reset_mark_bitmap() {
542   assert_gc_workers(_workers->active_workers());
543   mark_incomplete_marking_context();
544 
545   ShenandoahResetBitmapTask task;
546   _workers->run_task(&task);
547 }
548 
print_on(outputStream * st) const549 void ShenandoahHeap::print_on(outputStream* st) const {
550   st->print_cr("Shenandoah Heap");
551   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
552                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
553                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
554                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
555                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
556   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
557                num_regions(),
558                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
559                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
560 
561   st->print("Status: ");
562   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
563   if (is_concurrent_mark_in_progress())        st->print("marking, ");
564   if (is_evacuation_in_progress())             st->print("evacuating, ");
565   if (is_update_refs_in_progress())            st->print("updating refs, ");
566   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
567   if (is_full_gc_in_progress())                st->print("full gc, ");
568   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
569   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
570   if (is_concurrent_strong_root_in_progress() &&
571       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
572 
573   if (cancelled_gc()) {
574     st->print("cancelled");
575   } else {
576     st->print("not cancelled");
577   }
578   st->cr();
579 
580   st->print_cr("Reserved region:");
581   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
582                p2i(reserved_region().start()),
583                p2i(reserved_region().end()));
584 
585   ShenandoahCollectionSet* cset = collection_set();
586   st->print_cr("Collection set:");
587   if (cset != NULL) {
588     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
589     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
590   } else {
591     st->print_cr(" (NULL)");
592   }
593 
594   st->cr();
595   MetaspaceUtils::print_on(st);
596 
597   if (Verbose) {
598     print_heap_regions_on(st);
599   }
600 }
601 
602 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
603 public:
do_thread(Thread * thread)604   void do_thread(Thread* thread) {
605     assert(thread != NULL, "Sanity");
606     assert(thread->is_Worker_thread(), "Only worker thread expected");
607     ShenandoahThreadLocalData::initialize_gclab(thread);
608   }
609 };
610 
post_initialize()611 void ShenandoahHeap::post_initialize() {
612   CollectedHeap::post_initialize();
613   MutexLocker ml(Threads_lock);
614 
615   ShenandoahInitWorkerGCLABClosure init_gclabs;
616   _workers->threads_do(&init_gclabs);
617 
618   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
619   // Now, we will let WorkGang to initialize gclab when new worker is created.
620   _workers->set_initialize_gclab();
621   if (_safepoint_workers != NULL) {
622     _safepoint_workers->threads_do(&init_gclabs);
623     _safepoint_workers->set_initialize_gclab();
624   }
625 
626   _heuristics->initialize();
627 
628   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
629 }
630 
used() const631 size_t ShenandoahHeap::used() const {
632   return Atomic::load(&_used);
633 }
634 
committed() const635 size_t ShenandoahHeap::committed() const {
636   return Atomic::load(&_committed);
637 }
638 
increase_committed(size_t bytes)639 void ShenandoahHeap::increase_committed(size_t bytes) {
640   shenandoah_assert_heaplocked_or_safepoint();
641   _committed += bytes;
642 }
643 
decrease_committed(size_t bytes)644 void ShenandoahHeap::decrease_committed(size_t bytes) {
645   shenandoah_assert_heaplocked_or_safepoint();
646   _committed -= bytes;
647 }
648 
increase_used(size_t bytes)649 void ShenandoahHeap::increase_used(size_t bytes) {
650   Atomic::add(&_used, bytes, memory_order_relaxed);
651 }
652 
set_used(size_t bytes)653 void ShenandoahHeap::set_used(size_t bytes) {
654   Atomic::store(&_used, bytes);
655 }
656 
decrease_used(size_t bytes)657 void ShenandoahHeap::decrease_used(size_t bytes) {
658   assert(used() >= bytes, "never decrease heap size by more than we've left");
659   Atomic::sub(&_used, bytes, memory_order_relaxed);
660 }
661 
increase_allocated(size_t bytes)662 void ShenandoahHeap::increase_allocated(size_t bytes) {
663   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
664 }
665 
notify_mutator_alloc_words(size_t words,bool waste)666 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
667   size_t bytes = words * HeapWordSize;
668   if (!waste) {
669     increase_used(bytes);
670   }
671   increase_allocated(bytes);
672   if (ShenandoahPacing) {
673     control_thread()->pacing_notify_alloc(words);
674     if (waste) {
675       pacer()->claim_for_alloc(words, true);
676     }
677   }
678 }
679 
capacity() const680 size_t ShenandoahHeap::capacity() const {
681   return committed();
682 }
683 
max_capacity() const684 size_t ShenandoahHeap::max_capacity() const {
685   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
686 }
687 
soft_max_capacity() const688 size_t ShenandoahHeap::soft_max_capacity() const {
689   size_t v = Atomic::load(&_soft_max_size);
690   assert(min_capacity() <= v && v <= max_capacity(),
691          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
692          min_capacity(), v, max_capacity());
693   return v;
694 }
695 
set_soft_max_capacity(size_t v)696 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
697   assert(min_capacity() <= v && v <= max_capacity(),
698          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
699          min_capacity(), v, max_capacity());
700   Atomic::store(&_soft_max_size, v);
701 }
702 
min_capacity() const703 size_t ShenandoahHeap::min_capacity() const {
704   return _minimum_size;
705 }
706 
initial_capacity() const707 size_t ShenandoahHeap::initial_capacity() const {
708   return _initial_size;
709 }
710 
is_in(const void * p) const711 bool ShenandoahHeap::is_in(const void* p) const {
712   HeapWord* heap_base = (HeapWord*) base();
713   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
714   return p >= heap_base && p < last_region_end;
715 }
716 
op_uncommit(double shrink_before,size_t shrink_until)717 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
718   assert (ShenandoahUncommit, "should be enabled");
719 
720   // Application allocates from the beginning of the heap, and GC allocates at
721   // the end of it. It is more efficient to uncommit from the end, so that applications
722   // could enjoy the near committed regions. GC allocations are much less frequent,
723   // and therefore can accept the committing costs.
724 
725   size_t count = 0;
726   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
727     ShenandoahHeapRegion* r = get_region(i - 1);
728     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
729       ShenandoahHeapLocker locker(lock());
730       if (r->is_empty_committed()) {
731         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
732           break;
733         }
734 
735         r->make_uncommitted();
736         count++;
737       }
738     }
739     SpinPause(); // allow allocators to take the lock
740   }
741 
742   if (count > 0) {
743     control_thread()->notify_heap_changed();
744   }
745 }
746 
allocate_from_gclab_slow(Thread * thread,size_t size)747 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
748   // New object should fit the GCLAB size
749   size_t min_size = MAX2(size, PLAB::min_size());
750 
751   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
752   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
753   new_size = MIN2(new_size, PLAB::max_size());
754   new_size = MAX2(new_size, PLAB::min_size());
755 
756   // Record new heuristic value even if we take any shortcut. This captures
757   // the case when moderately-sized objects always take a shortcut. At some point,
758   // heuristics should catch up with them.
759   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
760 
761   if (new_size < size) {
762     // New size still does not fit the object. Fall back to shared allocation.
763     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
764     return NULL;
765   }
766 
767   // Retire current GCLAB, and allocate a new one.
768   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
769   gclab->retire();
770 
771   size_t actual_size = 0;
772   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
773   if (gclab_buf == NULL) {
774     return NULL;
775   }
776 
777   assert (size <= actual_size, "allocation should fit");
778 
779   if (ZeroTLAB) {
780     // ..and clear it.
781     Copy::zero_to_words(gclab_buf, actual_size);
782   } else {
783     // ...and zap just allocated object.
784 #ifdef ASSERT
785     // Skip mangling the space corresponding to the object header to
786     // ensure that the returned space is not considered parsable by
787     // any concurrent GC thread.
788     size_t hdr_size = oopDesc::header_size();
789     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
790 #endif // ASSERT
791   }
792   gclab->set_buf(gclab_buf, actual_size);
793   return gclab->allocate(size);
794 }
795 
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)796 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
797                                             size_t requested_size,
798                                             size_t* actual_size) {
799   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
800   HeapWord* res = allocate_memory(req);
801   if (res != NULL) {
802     *actual_size = req.actual_size();
803   } else {
804     *actual_size = 0;
805   }
806   return res;
807 }
808 
allocate_new_gclab(size_t min_size,size_t word_size,size_t * actual_size)809 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
810                                              size_t word_size,
811                                              size_t* actual_size) {
812   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
813   HeapWord* res = allocate_memory(req);
814   if (res != NULL) {
815     *actual_size = req.actual_size();
816   } else {
817     *actual_size = 0;
818   }
819   return res;
820 }
821 
allocate_memory(ShenandoahAllocRequest & req)822 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
823   intptr_t pacer_epoch = 0;
824   bool in_new_region = false;
825   HeapWord* result = NULL;
826 
827   if (req.is_mutator_alloc()) {
828     if (ShenandoahPacing) {
829       pacer()->pace_for_alloc(req.size());
830       pacer_epoch = pacer()->epoch();
831     }
832 
833     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
834       result = allocate_memory_under_lock(req, in_new_region);
835     }
836 
837     // Allocation failed, block until control thread reacted, then retry allocation.
838     //
839     // It might happen that one of the threads requesting allocation would unblock
840     // way later after GC happened, only to fail the second allocation, because
841     // other threads have already depleted the free storage. In this case, a better
842     // strategy is to try again, as long as GC makes progress.
843     //
844     // Then, we need to make sure the allocation was retried after at least one
845     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
846 
847     size_t tries = 0;
848 
849     while (result == NULL && _progress_last_gc.is_set()) {
850       tries++;
851       control_thread()->handle_alloc_failure(req);
852       result = allocate_memory_under_lock(req, in_new_region);
853     }
854 
855     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
856       tries++;
857       control_thread()->handle_alloc_failure(req);
858       result = allocate_memory_under_lock(req, in_new_region);
859     }
860 
861   } else {
862     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
863     result = allocate_memory_under_lock(req, in_new_region);
864     // Do not call handle_alloc_failure() here, because we cannot block.
865     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
866   }
867 
868   if (in_new_region) {
869     control_thread()->notify_heap_changed();
870   }
871 
872   if (result != NULL) {
873     size_t requested = req.size();
874     size_t actual = req.actual_size();
875 
876     assert (req.is_lab_alloc() || (requested == actual),
877             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
878             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
879 
880     if (req.is_mutator_alloc()) {
881       notify_mutator_alloc_words(actual, false);
882 
883       // If we requested more than we were granted, give the rest back to pacer.
884       // This only matters if we are in the same pacing epoch: do not try to unpace
885       // over the budget for the other phase.
886       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
887         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
888       }
889     } else {
890       increase_used(actual*HeapWordSize);
891     }
892   }
893 
894   return result;
895 }
896 
allocate_memory_under_lock(ShenandoahAllocRequest & req,bool & in_new_region)897 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
898   ShenandoahHeapLocker locker(lock());
899   return _free_set->allocate(req, in_new_region);
900 }
901 
mem_allocate(size_t size,bool * gc_overhead_limit_was_exceeded)902 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
903                                         bool*  gc_overhead_limit_was_exceeded) {
904   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
905   return allocate_memory(req);
906 }
907 
satisfy_failed_metadata_allocation(ClassLoaderData * loader_data,size_t size,Metaspace::MetadataType mdtype)908 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
909                                                              size_t size,
910                                                              Metaspace::MetadataType mdtype) {
911   MetaWord* result;
912 
913   // Inform metaspace OOM to GC heuristics if class unloading is possible.
914   if (heuristics()->can_unload_classes()) {
915     ShenandoahHeuristics* h = heuristics();
916     h->record_metaspace_oom();
917   }
918 
919   // Expand and retry allocation
920   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
921   if (result != NULL) {
922     return result;
923   }
924 
925   // Start full GC
926   collect(GCCause::_metadata_GC_clear_soft_refs);
927 
928   // Retry allocation
929   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
930   if (result != NULL) {
931     return result;
932   }
933 
934   // Expand and retry allocation
935   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
936   if (result != NULL) {
937     return result;
938   }
939 
940   // Out of memory
941   return NULL;
942 }
943 
944 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
945 private:
946   ShenandoahHeap* const _heap;
947   Thread* const _thread;
948 public:
ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap * heap)949   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
950     _heap(heap), _thread(Thread::current()) {}
951 
do_object(oop p)952   void do_object(oop p) {
953     shenandoah_assert_marked(NULL, p);
954     if (!p->is_forwarded()) {
955       _heap->evacuate_object(p, _thread);
956     }
957   }
958 };
959 
960 class ShenandoahEvacuationTask : public AbstractGangTask {
961 private:
962   ShenandoahHeap* const _sh;
963   ShenandoahCollectionSet* const _cs;
964   bool _concurrent;
965 public:
ShenandoahEvacuationTask(ShenandoahHeap * sh,ShenandoahCollectionSet * cs,bool concurrent)966   ShenandoahEvacuationTask(ShenandoahHeap* sh,
967                            ShenandoahCollectionSet* cs,
968                            bool concurrent) :
969     AbstractGangTask("Shenandoah Evacuation"),
970     _sh(sh),
971     _cs(cs),
972     _concurrent(concurrent)
973   {}
974 
work(uint worker_id)975   void work(uint worker_id) {
976     if (_concurrent) {
977       ShenandoahConcurrentWorkerSession worker_session(worker_id);
978       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
979       ShenandoahEvacOOMScope oom_evac_scope;
980       do_work();
981     } else {
982       ShenandoahParallelWorkerSession worker_session(worker_id);
983       ShenandoahEvacOOMScope oom_evac_scope;
984       do_work();
985     }
986   }
987 
988 private:
do_work()989   void do_work() {
990     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
991     ShenandoahHeapRegion* r;
992     while ((r =_cs->claim_next()) != NULL) {
993       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
994       _sh->marked_object_iterate(r, &cl);
995 
996       if (ShenandoahPacing) {
997         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
998       }
999 
1000       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1001         break;
1002       }
1003     }
1004   }
1005 };
1006 
evacuate_collection_set(bool concurrent)1007 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1008   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1009   workers()->run_task(&task);
1010 }
1011 
trash_cset_regions()1012 void ShenandoahHeap::trash_cset_regions() {
1013   ShenandoahHeapLocker locker(lock());
1014 
1015   ShenandoahCollectionSet* set = collection_set();
1016   ShenandoahHeapRegion* r;
1017   set->clear_current_index();
1018   while ((r = set->next()) != NULL) {
1019     r->make_trash();
1020   }
1021   collection_set()->clear();
1022 }
1023 
print_heap_regions_on(outputStream * st) const1024 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1025   st->print_cr("Heap Regions:");
1026   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1027   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1028   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark");
1029   st->print_cr("SN=alloc sequence number");
1030 
1031   for (size_t i = 0; i < num_regions(); i++) {
1032     get_region(i)->print_on(st);
1033   }
1034 }
1035 
trash_humongous_region_at(ShenandoahHeapRegion * start)1036 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1037   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1038 
1039   oop humongous_obj = cast_to_oop(start->bottom());
1040   size_t size = humongous_obj->size();
1041   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1042   size_t index = start->index() + required_regions - 1;
1043 
1044   assert(!start->has_live(), "liveness must be zero");
1045 
1046   for(size_t i = 0; i < required_regions; i++) {
1047     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1048     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1049     ShenandoahHeapRegion* region = get_region(index --);
1050 
1051     assert(region->is_humongous(), "expect correct humongous start or continuation");
1052     assert(!region->is_cset(), "Humongous region should not be in collection set");
1053 
1054     region->make_trash_immediate();
1055   }
1056 }
1057 
1058 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1059 public:
ShenandoahCheckCleanGCLABClosure()1060   ShenandoahCheckCleanGCLABClosure() {}
do_thread(Thread * thread)1061   void do_thread(Thread* thread) {
1062     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1063     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1064     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1065   }
1066 };
1067 
1068 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1069 private:
1070   bool const _resize;
1071 public:
ShenandoahRetireGCLABClosure(bool resize)1072   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
do_thread(Thread * thread)1073   void do_thread(Thread* thread) {
1074     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1075     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1076     gclab->retire();
1077     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1078       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1079     }
1080   }
1081 };
1082 
labs_make_parsable()1083 void ShenandoahHeap::labs_make_parsable() {
1084   assert(UseTLAB, "Only call with UseTLAB");
1085 
1086   ShenandoahRetireGCLABClosure cl(false);
1087 
1088   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1089     ThreadLocalAllocBuffer& tlab = t->tlab();
1090     tlab.make_parsable();
1091     cl.do_thread(t);
1092   }
1093 
1094   workers()->threads_do(&cl);
1095 }
1096 
tlabs_retire(bool resize)1097 void ShenandoahHeap::tlabs_retire(bool resize) {
1098   assert(UseTLAB, "Only call with UseTLAB");
1099   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1100 
1101   ThreadLocalAllocStats stats;
1102 
1103   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1104     ThreadLocalAllocBuffer& tlab = t->tlab();
1105     tlab.retire(&stats);
1106     if (resize) {
1107       tlab.resize();
1108     }
1109   }
1110 
1111   stats.publish();
1112 
1113 #ifdef ASSERT
1114   ShenandoahCheckCleanGCLABClosure cl;
1115   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1116     cl.do_thread(t);
1117   }
1118   workers()->threads_do(&cl);
1119 #endif
1120 }
1121 
gclabs_retire(bool resize)1122 void ShenandoahHeap::gclabs_retire(bool resize) {
1123   assert(UseTLAB, "Only call with UseTLAB");
1124   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1125 
1126   ShenandoahRetireGCLABClosure cl(resize);
1127   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1128     cl.do_thread(t);
1129   }
1130   workers()->threads_do(&cl);
1131 
1132   if (safepoint_workers() != NULL) {
1133     safepoint_workers()->threads_do(&cl);
1134   }
1135 }
1136 
1137 // Returns size in bytes
unsafe_max_tlab_alloc(Thread * thread) const1138 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1139   if (ShenandoahElasticTLAB) {
1140     // With Elastic TLABs, return the max allowed size, and let the allocation path
1141     // figure out the safe size for current allocation.
1142     return ShenandoahHeapRegion::max_tlab_size_bytes();
1143   } else {
1144     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1145   }
1146 }
1147 
max_tlab_size() const1148 size_t ShenandoahHeap::max_tlab_size() const {
1149   // Returns size in words
1150   return ShenandoahHeapRegion::max_tlab_size_words();
1151 }
1152 
collect(GCCause::Cause cause)1153 void ShenandoahHeap::collect(GCCause::Cause cause) {
1154   control_thread()->request_gc(cause);
1155 }
1156 
do_full_collection(bool clear_all_soft_refs)1157 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1158   //assert(false, "Shouldn't need to do full collections");
1159 }
1160 
block_start(const void * addr) const1161 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1162   ShenandoahHeapRegion* r = heap_region_containing(addr);
1163   if (r != NULL) {
1164     return r->block_start(addr);
1165   }
1166   return NULL;
1167 }
1168 
block_is_obj(const HeapWord * addr) const1169 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1170   ShenandoahHeapRegion* r = heap_region_containing(addr);
1171   return r->block_is_obj(addr);
1172 }
1173 
print_location(outputStream * st,void * addr) const1174 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1175   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1176 }
1177 
prepare_for_verify()1178 void ShenandoahHeap::prepare_for_verify() {
1179   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1180     labs_make_parsable();
1181   }
1182 }
1183 
gc_threads_do(ThreadClosure * tcl) const1184 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1185   workers()->threads_do(tcl);
1186   if (_safepoint_workers != NULL) {
1187     _safepoint_workers->threads_do(tcl);
1188   }
1189   if (ShenandoahStringDedup::is_enabled()) {
1190     ShenandoahStringDedup::threads_do(tcl);
1191   }
1192 }
1193 
print_tracing_info() const1194 void ShenandoahHeap::print_tracing_info() const {
1195   LogTarget(Info, gc, stats) lt;
1196   if (lt.is_enabled()) {
1197     ResourceMark rm;
1198     LogStream ls(lt);
1199 
1200     phase_timings()->print_global_on(&ls);
1201 
1202     ls.cr();
1203     ls.cr();
1204 
1205     shenandoah_policy()->print_gc_stats(&ls);
1206 
1207     ls.cr();
1208     ls.cr();
1209   }
1210 }
1211 
verify(VerifyOption vo)1212 void ShenandoahHeap::verify(VerifyOption vo) {
1213   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1214     if (ShenandoahVerify) {
1215       verifier()->verify_generic(vo);
1216     } else {
1217       // TODO: Consider allocating verification bitmaps on demand,
1218       // and turn this on unconditionally.
1219     }
1220   }
1221 }
tlab_capacity(Thread * thr) const1222 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1223   return _free_set->capacity();
1224 }
1225 
1226 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1227 private:
1228   MarkBitMap* _bitmap;
1229   ShenandoahScanObjectStack* _oop_stack;
1230   ShenandoahHeap* const _heap;
1231   ShenandoahMarkingContext* const _marking_context;
1232 
1233   template <class T>
do_oop_work(T * p)1234   void do_oop_work(T* p) {
1235     T o = RawAccess<>::oop_load(p);
1236     if (!CompressedOops::is_null(o)) {
1237       oop obj = CompressedOops::decode_not_null(o);
1238       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1239         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1240         return;
1241       }
1242       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1243 
1244       assert(oopDesc::is_oop(obj), "must be a valid oop");
1245       if (!_bitmap->is_marked(obj)) {
1246         _bitmap->mark(obj);
1247         _oop_stack->push(obj);
1248       }
1249     }
1250   }
1251 public:
ObjectIterateScanRootClosure(MarkBitMap * bitmap,ShenandoahScanObjectStack * oop_stack)1252   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1253     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1254     _marking_context(_heap->marking_context()) {}
do_oop(oop * p)1255   void do_oop(oop* p)       { do_oop_work(p); }
do_oop(narrowOop * p)1256   void do_oop(narrowOop* p) { do_oop_work(p); }
1257 };
1258 
1259 /*
1260  * This is public API, used in preparation of object_iterate().
1261  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1262  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1263  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1264  */
ensure_parsability(bool retire_tlabs)1265 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1266   // No-op.
1267 }
1268 
1269 /*
1270  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1271  *
1272  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1273  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1274  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1275  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1276  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1277  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1278  * wiped the bitmap in preparation for next marking).
1279  *
1280  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1281  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1282  * is allowed to report dead objects, but is not required to do so.
1283  */
object_iterate(ObjectClosure * cl)1284 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1285   // Reset bitmap
1286   if (!prepare_aux_bitmap_for_iteration())
1287     return;
1288 
1289   ShenandoahScanObjectStack oop_stack;
1290   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1291   // Seed the stack with root scan
1292   scan_roots_for_iteration(&oop_stack, &oops);
1293 
1294   // Work through the oop stack to traverse heap
1295   while (! oop_stack.is_empty()) {
1296     oop obj = oop_stack.pop();
1297     assert(oopDesc::is_oop(obj), "must be a valid oop");
1298     cl->do_object(obj);
1299     obj->oop_iterate(&oops);
1300   }
1301 
1302   assert(oop_stack.is_empty(), "should be empty");
1303   // Reclaim bitmap
1304   reclaim_aux_bitmap_for_iteration();
1305 }
1306 
prepare_aux_bitmap_for_iteration()1307 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1308   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1309 
1310   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1311     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1312     return false;
1313   }
1314   // Reset bitmap
1315   _aux_bit_map.clear();
1316   return true;
1317 }
1318 
scan_roots_for_iteration(ShenandoahScanObjectStack * oop_stack,ObjectIterateScanRootClosure * oops)1319 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1320   // Process GC roots according to current GC cycle
1321   // This populates the work stack with initial objects
1322   // It is important to relinquish the associated locks before diving
1323   // into heap dumper
1324   ShenandoahHeapIterationRootScanner rp;
1325   rp.roots_do(oops);
1326 }
1327 
reclaim_aux_bitmap_for_iteration()1328 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1329   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1330     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1331   }
1332 }
1333 
1334 // Closure for parallelly iterate objects
1335 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1336 private:
1337   MarkBitMap* _bitmap;
1338   ShenandoahObjToScanQueue* _queue;
1339   ShenandoahHeap* const _heap;
1340   ShenandoahMarkingContext* const _marking_context;
1341 
1342   template <class T>
do_oop_work(T * p)1343   void do_oop_work(T* p) {
1344     T o = RawAccess<>::oop_load(p);
1345     if (!CompressedOops::is_null(o)) {
1346       oop obj = CompressedOops::decode_not_null(o);
1347       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1348         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1349         return;
1350       }
1351       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1352 
1353       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1354       if (_bitmap->par_mark(obj)) {
1355         _queue->push(ShenandoahMarkTask(obj));
1356       }
1357     }
1358   }
1359 public:
ShenandoahObjectIterateParScanClosure(MarkBitMap * bitmap,ShenandoahObjToScanQueue * q)1360   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1361     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1362     _marking_context(_heap->marking_context()) {}
do_oop(oop * p)1363   void do_oop(oop* p)       { do_oop_work(p); }
do_oop(narrowOop * p)1364   void do_oop(narrowOop* p) { do_oop_work(p); }
1365 };
1366 
1367 // Object iterator for parallel heap iteraion.
1368 // The root scanning phase happenes in construction as a preparation of
1369 // parallel marking queues.
1370 // Every worker processes it's own marking queue. work-stealing is used
1371 // to balance workload.
1372 class ShenandoahParallelObjectIterator : public ParallelObjectIterator {
1373 private:
1374   uint                         _num_workers;
1375   bool                         _init_ready;
1376   MarkBitMap*                  _aux_bit_map;
1377   ShenandoahHeap*              _heap;
1378   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1379   ShenandoahObjToScanQueueSet* _task_queues;
1380 public:
ShenandoahParallelObjectIterator(uint num_workers,MarkBitMap * bitmap)1381   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1382         _num_workers(num_workers),
1383         _init_ready(false),
1384         _aux_bit_map(bitmap),
1385         _heap(ShenandoahHeap::heap()) {
1386     // Initialize bitmap
1387     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1388     if (!_init_ready) {
1389       return;
1390     }
1391 
1392     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1393     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1394 
1395     _init_ready = prepare_worker_queues();
1396   }
1397 
~ShenandoahParallelObjectIterator()1398   ~ShenandoahParallelObjectIterator() {
1399     // Reclaim bitmap
1400     _heap->reclaim_aux_bitmap_for_iteration();
1401     // Reclaim queue for workers
1402     if (_task_queues!= NULL) {
1403       for (uint i = 0; i < _num_workers; ++i) {
1404         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1405         if (q != NULL) {
1406           delete q;
1407           _task_queues->register_queue(i, NULL);
1408         }
1409       }
1410       delete _task_queues;
1411       _task_queues = NULL;
1412     }
1413   }
1414 
object_iterate(ObjectClosure * cl,uint worker_id)1415   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1416     if (_init_ready) {
1417       object_iterate_parallel(cl, worker_id, _task_queues);
1418     }
1419   }
1420 
1421 private:
1422   // Divide global root_stack into worker queues
prepare_worker_queues()1423   bool prepare_worker_queues() {
1424     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1425     // Initialize queues for every workers
1426     for (uint i = 0; i < _num_workers; ++i) {
1427       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1428       task_queue->initialize();
1429       _task_queues->register_queue(i, task_queue);
1430     }
1431     // Divide roots among the workers. Assume that object referencing distribution
1432     // is related with root kind, use round-robin to make every worker have same chance
1433     // to process every kind of roots
1434     size_t roots_num = _roots_stack.size();
1435     if (roots_num == 0) {
1436       // No work to do
1437       return false;
1438     }
1439 
1440     for (uint j = 0; j < roots_num; j++) {
1441       uint stack_id = j % _num_workers;
1442       oop obj = _roots_stack.pop();
1443       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1444     }
1445     return true;
1446   }
1447 
object_iterate_parallel(ObjectClosure * cl,uint worker_id,ShenandoahObjToScanQueueSet * queue_set)1448   void object_iterate_parallel(ObjectClosure* cl,
1449                                uint worker_id,
1450                                ShenandoahObjToScanQueueSet* queue_set) {
1451     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1452     assert(queue_set != NULL, "task queue must not be NULL");
1453 
1454     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1455     assert(q != NULL, "object iterate queue must not be NULL");
1456 
1457     ShenandoahMarkTask t;
1458     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1459 
1460     // Work through the queue to traverse heap.
1461     // Steal when there is no task in queue.
1462     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1463       oop obj = t.obj();
1464       assert(oopDesc::is_oop(obj), "must be a valid oop");
1465       cl->do_object(obj);
1466       obj->oop_iterate(&oops);
1467     }
1468     assert(q->is_empty(), "should be empty");
1469   }
1470 };
1471 
parallel_object_iterator(uint workers)1472 ParallelObjectIterator* ShenandoahHeap::parallel_object_iterator(uint workers) {
1473   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1474 }
1475 
1476 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
keep_alive(oop obj)1477 void ShenandoahHeap::keep_alive(oop obj) {
1478   if (is_concurrent_mark_in_progress() && (obj != NULL)) {
1479     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1480   }
1481 }
1482 
heap_region_iterate(ShenandoahHeapRegionClosure * blk) const1483 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1484   for (size_t i = 0; i < num_regions(); i++) {
1485     ShenandoahHeapRegion* current = get_region(i);
1486     blk->heap_region_do(current);
1487   }
1488 }
1489 
1490 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1491 private:
1492   ShenandoahHeap* const _heap;
1493   ShenandoahHeapRegionClosure* const _blk;
1494 
1495   shenandoah_padding(0);
1496   volatile size_t _index;
1497   shenandoah_padding(1);
1498 
1499 public:
ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure * blk)1500   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1501           AbstractGangTask("Shenandoah Parallel Region Operation"),
1502           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1503 
work(uint worker_id)1504   void work(uint worker_id) {
1505     ShenandoahParallelWorkerSession worker_session(worker_id);
1506     size_t stride = ShenandoahParallelRegionStride;
1507 
1508     size_t max = _heap->num_regions();
1509     while (Atomic::load(&_index) < max) {
1510       size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
1511       size_t start = cur;
1512       size_t end = MIN2(cur + stride, max);
1513       if (start >= max) break;
1514 
1515       for (size_t i = cur; i < end; i++) {
1516         ShenandoahHeapRegion* current = _heap->get_region(i);
1517         _blk->heap_region_do(current);
1518       }
1519     }
1520   }
1521 };
1522 
parallel_heap_region_iterate(ShenandoahHeapRegionClosure * blk) const1523 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1524   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1525   if (num_regions() > ShenandoahParallelRegionStride) {
1526     ShenandoahParallelHeapRegionTask task(blk);
1527     workers()->run_task(&task);
1528   } else {
1529     heap_region_iterate(blk);
1530   }
1531 }
1532 
1533 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1534 private:
1535   ShenandoahMarkingContext* const _ctx;
1536 public:
ShenandoahInitMarkUpdateRegionStateClosure()1537   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1538 
heap_region_do(ShenandoahHeapRegion * r)1539   void heap_region_do(ShenandoahHeapRegion* r) {
1540     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1541     if (r->is_active()) {
1542       // Check if region needs updating its TAMS. We have updated it already during concurrent
1543       // reset, so it is very likely we don't need to do another write here.
1544       if (_ctx->top_at_mark_start(r) != r->top()) {
1545         _ctx->capture_top_at_mark_start(r);
1546       }
1547     } else {
1548       assert(_ctx->top_at_mark_start(r) == r->top(),
1549              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1550     }
1551   }
1552 
is_thread_safe()1553   bool is_thread_safe() { return true; }
1554 };
1555 
1556 class ShenandoahRendezvousClosure : public HandshakeClosure {
1557 public:
ShenandoahRendezvousClosure()1558   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
do_thread(Thread * thread)1559   inline void do_thread(Thread* thread) {}
1560 };
1561 
rendezvous_threads()1562 void ShenandoahHeap::rendezvous_threads() {
1563   ShenandoahRendezvousClosure cl;
1564   Handshake::execute(&cl);
1565 }
1566 
recycle_trash()1567 void ShenandoahHeap::recycle_trash() {
1568   free_set()->recycle_trash();
1569 }
1570 
1571 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1572 private:
1573   ShenandoahMarkingContext* const _ctx;
1574 public:
ShenandoahResetUpdateRegionStateClosure()1575   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1576 
heap_region_do(ShenandoahHeapRegion * r)1577   void heap_region_do(ShenandoahHeapRegion* r) {
1578     if (r->is_active()) {
1579       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1580       // anyway to capture any updates that happened since now.
1581       r->clear_live_data();
1582       _ctx->capture_top_at_mark_start(r);
1583     }
1584   }
1585 
is_thread_safe()1586   bool is_thread_safe() { return true; }
1587 };
1588 
prepare_gc()1589 void ShenandoahHeap::prepare_gc() {
1590   reset_mark_bitmap();
1591 
1592   ShenandoahResetUpdateRegionStateClosure cl;
1593   parallel_heap_region_iterate(&cl);
1594 }
1595 
1596 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1597 private:
1598   ShenandoahMarkingContext* const _ctx;
1599   ShenandoahHeapLock* const _lock;
1600 
1601 public:
ShenandoahFinalMarkUpdateRegionStateClosure()1602   ShenandoahFinalMarkUpdateRegionStateClosure() :
1603     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1604 
heap_region_do(ShenandoahHeapRegion * r)1605   void heap_region_do(ShenandoahHeapRegion* r) {
1606     if (r->is_active()) {
1607       // All allocations past TAMS are implicitly live, adjust the region data.
1608       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1609       HeapWord *tams = _ctx->top_at_mark_start(r);
1610       HeapWord *top = r->top();
1611       if (top > tams) {
1612         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1613       }
1614 
1615       // We are about to select the collection set, make sure it knows about
1616       // current pinning status. Also, this allows trashing more regions that
1617       // now have their pinning status dropped.
1618       if (r->is_pinned()) {
1619         if (r->pin_count() == 0) {
1620           ShenandoahHeapLocker locker(_lock);
1621           r->make_unpinned();
1622         }
1623       } else {
1624         if (r->pin_count() > 0) {
1625           ShenandoahHeapLocker locker(_lock);
1626           r->make_pinned();
1627         }
1628       }
1629 
1630       // Remember limit for updating refs. It's guaranteed that we get no
1631       // from-space-refs written from here on.
1632       r->set_update_watermark_at_safepoint(r->top());
1633     } else {
1634       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1635       assert(_ctx->top_at_mark_start(r) == r->top(),
1636              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1637     }
1638   }
1639 
is_thread_safe()1640   bool is_thread_safe() { return true; }
1641 };
1642 
prepare_regions_and_collection_set(bool concurrent)1643 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1644   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1645   {
1646     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1647                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1648     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1649     parallel_heap_region_iterate(&cl);
1650 
1651     assert_pinned_region_status();
1652   }
1653 
1654   {
1655     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1656                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1657     ShenandoahHeapLocker locker(lock());
1658     _collection_set->clear();
1659     heuristics()->choose_collection_set(_collection_set);
1660   }
1661 
1662   {
1663     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1664                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1665     ShenandoahHeapLocker locker(lock());
1666     _free_set->rebuild();
1667   }
1668 }
1669 
do_class_unloading()1670 void ShenandoahHeap::do_class_unloading() {
1671   _unloader.unload();
1672 }
1673 
stw_weak_refs(bool full_gc)1674 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1675   // Weak refs processing
1676   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1677                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1678   ShenandoahTimingsTracker t(phase);
1679   ShenandoahGCWorkerPhase worker_phase(phase);
1680   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1681 }
1682 
prepare_update_heap_references(bool concurrent)1683 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1684   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1685 
1686   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1687   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1688   // for future GCLABs here.
1689   if (UseTLAB) {
1690     ShenandoahGCPhase phase(concurrent ?
1691                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1692                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1693     gclabs_retire(ResizeTLAB);
1694   }
1695 
1696   _update_refs_iterator.reset();
1697 }
1698 
set_gc_state_all_threads(char state)1699 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1700   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1701     ShenandoahThreadLocalData::set_gc_state(t, state);
1702   }
1703 }
1704 
set_gc_state_mask(uint mask,bool value)1705 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1706   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1707   _gc_state.set_cond(mask, value);
1708   set_gc_state_all_threads(_gc_state.raw_value());
1709 }
1710 
set_concurrent_mark_in_progress(bool in_progress)1711 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1712   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1713   set_gc_state_mask(MARKING, in_progress);
1714   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1715 }
1716 
set_evacuation_in_progress(bool in_progress)1717 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1718   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1719   set_gc_state_mask(EVACUATION, in_progress);
1720 }
1721 
set_concurrent_strong_root_in_progress(bool in_progress)1722 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1723   if (in_progress) {
1724     _concurrent_strong_root_in_progress.set();
1725   } else {
1726     _concurrent_strong_root_in_progress.unset();
1727   }
1728 }
1729 
set_concurrent_weak_root_in_progress(bool cond)1730 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1731   set_gc_state_mask(WEAK_ROOTS, cond);
1732 }
1733 
tracer()1734 GCTracer* ShenandoahHeap::tracer() {
1735   return shenandoah_policy()->tracer();
1736 }
1737 
tlab_used(Thread * thread) const1738 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1739   return _free_set->used();
1740 }
1741 
try_cancel_gc()1742 bool ShenandoahHeap::try_cancel_gc() {
1743   while (true) {
1744     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1745     if (prev == CANCELLABLE) return true;
1746     else if (prev == CANCELLED) return false;
1747     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1748     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1749     Thread* thread = Thread::current();
1750     if (thread->is_Java_thread()) {
1751       // We need to provide a safepoint here, otherwise we might
1752       // spin forever if a SP is pending.
1753       ThreadBlockInVM sp(thread->as_Java_thread());
1754       SpinPause();
1755     }
1756   }
1757 }
1758 
cancel_gc(GCCause::Cause cause)1759 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1760   if (try_cancel_gc()) {
1761     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1762     log_info(gc)("%s", msg.buffer());
1763     Events::log(Thread::current(), "%s", msg.buffer());
1764   }
1765 }
1766 
max_workers()1767 uint ShenandoahHeap::max_workers() {
1768   return _max_workers;
1769 }
1770 
stop()1771 void ShenandoahHeap::stop() {
1772   // The shutdown sequence should be able to terminate when GC is running.
1773 
1774   // Step 0. Notify policy to disable event recording.
1775   _shenandoah_policy->record_shutdown();
1776 
1777   // Step 1. Notify control thread that we are in shutdown.
1778   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1779   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1780   control_thread()->prepare_for_graceful_shutdown();
1781 
1782   // Step 2. Notify GC workers that we are cancelling GC.
1783   cancel_gc(GCCause::_shenandoah_stop_vm);
1784 
1785   // Step 3. Wait until GC worker exits normally.
1786   control_thread()->stop();
1787 }
1788 
stw_unload_classes(bool full_gc)1789 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1790   if (!unload_classes()) return;
1791   // Unload classes and purge SystemDictionary.
1792   {
1793     ShenandoahPhaseTimings::Phase phase = full_gc ?
1794                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1795                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1796     ShenandoahGCPhase gc_phase(phase);
1797     ShenandoahGCWorkerPhase worker_phase(phase);
1798     bool purged_class = SystemDictionary::do_unloading(gc_timer());
1799 
1800     ShenandoahIsAliveSelector is_alive;
1801     uint num_workers = _workers->active_workers();
1802     ShenandoahClassUnloadingTask unlink_task(phase, is_alive.is_alive_closure(), num_workers, purged_class);
1803     _workers->run_task(&unlink_task);
1804   }
1805 
1806   {
1807     ShenandoahGCPhase phase(full_gc ?
1808                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1809                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1810     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1811   }
1812   // Resize and verify metaspace
1813   MetaspaceGC::compute_new_size();
1814   DEBUG_ONLY(MetaspaceUtils::verify();)
1815 }
1816 
1817 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1818 // so they should not have forwarded oops.
1819 // However, we do need to "null" dead oops in the roots, if can not be done
1820 // in concurrent cycles.
stw_process_weak_roots(bool full_gc)1821 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1822   uint num_workers = _workers->active_workers();
1823   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1824                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1825                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1826   ShenandoahGCPhase phase(timing_phase);
1827   ShenandoahGCWorkerPhase worker_phase(timing_phase);
1828   // Cleanup weak roots
1829   if (has_forwarded_objects()) {
1830     ShenandoahForwardedIsAliveClosure is_alive;
1831     ShenandoahUpdateRefsClosure keep_alive;
1832     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1833       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1834     _workers->run_task(&cleaning_task);
1835   } else {
1836     ShenandoahIsAliveClosure is_alive;
1837 #ifdef ASSERT
1838     ShenandoahAssertNotForwardedClosure verify_cl;
1839     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1840       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1841 #else
1842     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1843       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1844 #endif
1845     _workers->run_task(&cleaning_task);
1846   }
1847 }
1848 
parallel_cleaning(bool full_gc)1849 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1850   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1851   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1852   ShenandoahGCPhase phase(full_gc ?
1853                           ShenandoahPhaseTimings::full_gc_purge :
1854                           ShenandoahPhaseTimings::degen_gc_purge);
1855   stw_weak_refs(full_gc);
1856   stw_process_weak_roots(full_gc);
1857   stw_unload_classes(full_gc);
1858 }
1859 
set_has_forwarded_objects(bool cond)1860 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1861   set_gc_state_mask(HAS_FORWARDED, cond);
1862 }
1863 
set_unload_classes(bool uc)1864 void ShenandoahHeap::set_unload_classes(bool uc) {
1865   _unload_classes.set_cond(uc);
1866 }
1867 
unload_classes() const1868 bool ShenandoahHeap::unload_classes() const {
1869   return _unload_classes.is_set();
1870 }
1871 
in_cset_fast_test_addr()1872 address ShenandoahHeap::in_cset_fast_test_addr() {
1873   ShenandoahHeap* heap = ShenandoahHeap::heap();
1874   assert(heap->collection_set() != NULL, "Sanity");
1875   return (address) heap->collection_set()->biased_map_address();
1876 }
1877 
cancelled_gc_addr()1878 address ShenandoahHeap::cancelled_gc_addr() {
1879   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1880 }
1881 
gc_state_addr()1882 address ShenandoahHeap::gc_state_addr() {
1883   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1884 }
1885 
bytes_allocated_since_gc_start()1886 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1887   return Atomic::load(&_bytes_allocated_since_gc_start);
1888 }
1889 
reset_bytes_allocated_since_gc_start()1890 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1891   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1892 }
1893 
set_degenerated_gc_in_progress(bool in_progress)1894 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1895   _degenerated_gc_in_progress.set_cond(in_progress);
1896 }
1897 
set_full_gc_in_progress(bool in_progress)1898 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1899   _full_gc_in_progress.set_cond(in_progress);
1900 }
1901 
set_full_gc_move_in_progress(bool in_progress)1902 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1903   assert (is_full_gc_in_progress(), "should be");
1904   _full_gc_move_in_progress.set_cond(in_progress);
1905 }
1906 
set_update_refs_in_progress(bool in_progress)1907 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1908   set_gc_state_mask(UPDATEREFS, in_progress);
1909 }
1910 
register_nmethod(nmethod * nm)1911 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1912   ShenandoahCodeRoots::register_nmethod(nm);
1913 }
1914 
unregister_nmethod(nmethod * nm)1915 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1916   ShenandoahCodeRoots::unregister_nmethod(nm);
1917 }
1918 
flush_nmethod(nmethod * nm)1919 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
1920   ShenandoahCodeRoots::flush_nmethod(nm);
1921 }
1922 
pin_object(JavaThread * thr,oop o)1923 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1924   heap_region_containing(o)->record_pin();
1925   return o;
1926 }
1927 
unpin_object(JavaThread * thr,oop o)1928 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1929   ShenandoahHeapRegion* r = heap_region_containing(o);
1930   assert(r != NULL, "Sanity");
1931   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1932   r->record_unpin();
1933 }
1934 
sync_pinned_region_status()1935 void ShenandoahHeap::sync_pinned_region_status() {
1936   ShenandoahHeapLocker locker(lock());
1937 
1938   for (size_t i = 0; i < num_regions(); i++) {
1939     ShenandoahHeapRegion *r = get_region(i);
1940     if (r->is_active()) {
1941       if (r->is_pinned()) {
1942         if (r->pin_count() == 0) {
1943           r->make_unpinned();
1944         }
1945       } else {
1946         if (r->pin_count() > 0) {
1947           r->make_pinned();
1948         }
1949       }
1950     }
1951   }
1952 
1953   assert_pinned_region_status();
1954 }
1955 
1956 #ifdef ASSERT
assert_pinned_region_status()1957 void ShenandoahHeap::assert_pinned_region_status() {
1958   for (size_t i = 0; i < num_regions(); i++) {
1959     ShenandoahHeapRegion* r = get_region(i);
1960     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1961            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1962   }
1963 }
1964 #endif
1965 
gc_timer() const1966 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1967   return _gc_timer;
1968 }
1969 
prepare_concurrent_roots()1970 void ShenandoahHeap::prepare_concurrent_roots() {
1971   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1972   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1973   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1974   set_concurrent_weak_root_in_progress(true);
1975   if (unload_classes()) {
1976     _unloader.prepare();
1977   }
1978 }
1979 
finish_concurrent_roots()1980 void ShenandoahHeap::finish_concurrent_roots() {
1981   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1982   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1983   if (unload_classes()) {
1984     _unloader.finish();
1985   }
1986 }
1987 
1988 #ifdef ASSERT
assert_gc_workers(uint nworkers)1989 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1990   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1991 
1992   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1993     if (UseDynamicNumberOfGCThreads) {
1994       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1995     } else {
1996       // Use ParallelGCThreads inside safepoints
1997       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1998     }
1999   } else {
2000     if (UseDynamicNumberOfGCThreads) {
2001       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2002     } else {
2003       // Use ConcGCThreads outside safepoints
2004       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2005     }
2006   }
2007 }
2008 #endif
2009 
verifier()2010 ShenandoahVerifier* ShenandoahHeap::verifier() {
2011   guarantee(ShenandoahVerify, "Should be enabled");
2012   assert (_verifier != NULL, "sanity");
2013   return _verifier;
2014 }
2015 
2016 template<bool CONCURRENT>
2017 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2018 private:
2019   ShenandoahHeap* _heap;
2020   ShenandoahRegionIterator* _regions;
2021 public:
ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator * regions)2022   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2023     AbstractGangTask("Shenandoah Update References"),
2024     _heap(ShenandoahHeap::heap()),
2025     _regions(regions) {
2026   }
2027 
work(uint worker_id)2028   void work(uint worker_id) {
2029     if (CONCURRENT) {
2030       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2031       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2032       do_work<ShenandoahConcUpdateRefsClosure>();
2033     } else {
2034       ShenandoahParallelWorkerSession worker_session(worker_id);
2035       do_work<ShenandoahSTWUpdateRefsClosure>();
2036     }
2037   }
2038 
2039 private:
2040   template<class T>
do_work()2041   void do_work() {
2042     T cl;
2043     ShenandoahHeapRegion* r = _regions->next();
2044     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2045     while (r != NULL) {
2046       HeapWord* update_watermark = r->get_update_watermark();
2047       assert (update_watermark >= r->bottom(), "sanity");
2048       if (r->is_active() && !r->is_cset()) {
2049         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2050       }
2051       if (ShenandoahPacing) {
2052         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2053       }
2054       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2055         return;
2056       }
2057       r = _regions->next();
2058     }
2059   }
2060 };
2061 
update_heap_references(bool concurrent)2062 void ShenandoahHeap::update_heap_references(bool concurrent) {
2063   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2064 
2065   if (concurrent) {
2066     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2067     workers()->run_task(&task);
2068   } else {
2069     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2070     workers()->run_task(&task);
2071   }
2072 }
2073 
2074 
2075 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2076 private:
2077   ShenandoahHeapLock* const _lock;
2078 
2079 public:
ShenandoahFinalUpdateRefsUpdateRegionStateClosure()2080   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2081 
heap_region_do(ShenandoahHeapRegion * r)2082   void heap_region_do(ShenandoahHeapRegion* r) {
2083     // Drop unnecessary "pinned" state from regions that does not have CP marks
2084     // anymore, as this would allow trashing them.
2085 
2086     if (r->is_active()) {
2087       if (r->is_pinned()) {
2088         if (r->pin_count() == 0) {
2089           ShenandoahHeapLocker locker(_lock);
2090           r->make_unpinned();
2091         }
2092       } else {
2093         if (r->pin_count() > 0) {
2094           ShenandoahHeapLocker locker(_lock);
2095           r->make_pinned();
2096         }
2097       }
2098     }
2099   }
2100 
is_thread_safe()2101   bool is_thread_safe() { return true; }
2102 };
2103 
update_heap_region_states(bool concurrent)2104 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2105   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2106   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2107 
2108   {
2109     ShenandoahGCPhase phase(concurrent ?
2110                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2111                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2112     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2113     parallel_heap_region_iterate(&cl);
2114 
2115     assert_pinned_region_status();
2116   }
2117 
2118   {
2119     ShenandoahGCPhase phase(concurrent ?
2120                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2121                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2122     trash_cset_regions();
2123   }
2124 }
2125 
rebuild_free_set(bool concurrent)2126 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2127   {
2128     ShenandoahGCPhase phase(concurrent ?
2129                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2130                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2131     ShenandoahHeapLocker locker(lock());
2132     _free_set->rebuild();
2133   }
2134 }
2135 
print_extended_on(outputStream * st) const2136 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2137   print_on(st);
2138   print_heap_regions_on(st);
2139 }
2140 
is_bitmap_slice_committed(ShenandoahHeapRegion * r,bool skip_self)2141 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2142   size_t slice = r->index() / _bitmap_regions_per_slice;
2143 
2144   size_t regions_from = _bitmap_regions_per_slice * slice;
2145   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2146   for (size_t g = regions_from; g < regions_to; g++) {
2147     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2148     if (skip_self && g == r->index()) continue;
2149     if (get_region(g)->is_committed()) {
2150       return true;
2151     }
2152   }
2153   return false;
2154 }
2155 
commit_bitmap_slice(ShenandoahHeapRegion * r)2156 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2157   shenandoah_assert_heaplocked();
2158 
2159   // Bitmaps in special regions do not need commits
2160   if (_bitmap_region_special) {
2161     return true;
2162   }
2163 
2164   if (is_bitmap_slice_committed(r, true)) {
2165     // Some other region from the group is already committed, meaning the bitmap
2166     // slice is already committed, we exit right away.
2167     return true;
2168   }
2169 
2170   // Commit the bitmap slice:
2171   size_t slice = r->index() / _bitmap_regions_per_slice;
2172   size_t off = _bitmap_bytes_per_slice * slice;
2173   size_t len = _bitmap_bytes_per_slice;
2174   char* start = (char*) _bitmap_region.start() + off;
2175 
2176   if (!os::commit_memory(start, len, false)) {
2177     return false;
2178   }
2179 
2180   if (AlwaysPreTouch) {
2181     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2182   }
2183 
2184   return true;
2185 }
2186 
uncommit_bitmap_slice(ShenandoahHeapRegion * r)2187 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2188   shenandoah_assert_heaplocked();
2189 
2190   // Bitmaps in special regions do not need uncommits
2191   if (_bitmap_region_special) {
2192     return true;
2193   }
2194 
2195   if (is_bitmap_slice_committed(r, true)) {
2196     // Some other region from the group is still committed, meaning the bitmap
2197     // slice is should stay committed, exit right away.
2198     return true;
2199   }
2200 
2201   // Uncommit the bitmap slice:
2202   size_t slice = r->index() / _bitmap_regions_per_slice;
2203   size_t off = _bitmap_bytes_per_slice * slice;
2204   size_t len = _bitmap_bytes_per_slice;
2205   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2206     return false;
2207   }
2208   return true;
2209 }
2210 
safepoint_synchronize_begin()2211 void ShenandoahHeap::safepoint_synchronize_begin() {
2212   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2213     SuspendibleThreadSet::synchronize();
2214   }
2215 }
2216 
safepoint_synchronize_end()2217 void ShenandoahHeap::safepoint_synchronize_end() {
2218   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2219     SuspendibleThreadSet::desynchronize();
2220   }
2221 }
2222 
entry_uncommit(double shrink_before,size_t shrink_until)2223 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2224   static const char *msg = "Concurrent uncommit";
2225   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2226   EventMark em("%s", msg);
2227 
2228   op_uncommit(shrink_before, shrink_until);
2229 }
2230 
try_inject_alloc_failure()2231 void ShenandoahHeap::try_inject_alloc_failure() {
2232   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2233     _inject_alloc_failure.set();
2234     os::naked_short_sleep(1);
2235     if (cancelled_gc()) {
2236       log_info(gc)("Allocation failure was successfully injected");
2237     }
2238   }
2239 }
2240 
should_inject_alloc_failure()2241 bool ShenandoahHeap::should_inject_alloc_failure() {
2242   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2243 }
2244 
initialize_serviceability()2245 void ShenandoahHeap::initialize_serviceability() {
2246   _memory_pool = new ShenandoahMemoryPool(this);
2247   _cycle_memory_manager.add_pool(_memory_pool);
2248   _stw_memory_manager.add_pool(_memory_pool);
2249 }
2250 
memory_managers()2251 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2252   GrowableArray<GCMemoryManager*> memory_managers(2);
2253   memory_managers.append(&_cycle_memory_manager);
2254   memory_managers.append(&_stw_memory_manager);
2255   return memory_managers;
2256 }
2257 
memory_pools()2258 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2259   GrowableArray<MemoryPool*> memory_pools(1);
2260   memory_pools.append(_memory_pool);
2261   return memory_pools;
2262 }
2263 
memory_usage()2264 MemoryUsage ShenandoahHeap::memory_usage() {
2265   return _memory_pool->get_memory_usage();
2266 }
2267 
ShenandoahRegionIterator()2268 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2269   _heap(ShenandoahHeap::heap()),
2270   _index(0) {}
2271 
ShenandoahRegionIterator(ShenandoahHeap * heap)2272 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2273   _heap(heap),
2274   _index(0) {}
2275 
reset()2276 void ShenandoahRegionIterator::reset() {
2277   _index = 0;
2278 }
2279 
has_next() const2280 bool ShenandoahRegionIterator::has_next() const {
2281   return _index < _heap->num_regions();
2282 }
2283 
gc_state() const2284 char ShenandoahHeap::gc_state() const {
2285   return _gc_state.raw_value();
2286 }
2287 
get_liveness_cache(uint worker_id)2288 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2289 #ifdef ASSERT
2290   assert(_liveness_cache != NULL, "sanity");
2291   assert(worker_id < _max_workers, "sanity");
2292   for (uint i = 0; i < num_regions(); i++) {
2293     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2294   }
2295 #endif
2296   return _liveness_cache[worker_id];
2297 }
2298 
flush_liveness_cache(uint worker_id)2299 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2300   assert(worker_id < _max_workers, "sanity");
2301   assert(_liveness_cache != NULL, "sanity");
2302   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2303   for (uint i = 0; i < num_regions(); i++) {
2304     ShenandoahLiveData live = ld[i];
2305     if (live > 0) {
2306       ShenandoahHeapRegion* r = get_region(i);
2307       r->increase_live_data_gc_words(live);
2308       ld[i] = 0;
2309     }
2310   }
2311 }
2312