1 /*
2 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25
26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
32 #include "gc/shenandoah/shenandoahControlThread.hpp"
33 #include "gc/shenandoah/shenandoahUtils.hpp"
34 #include "gc/shenandoah/shenandoahVMOperations.hpp"
35 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
36 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/universe.hpp"
39
ShenandoahControlThread()40 ShenandoahControlThread::ShenandoahControlThread() :
41 ConcurrentGCThread(),
42 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
43 _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
44 _periodic_task(this),
45 _requested_gc_cause(GCCause::_no_cause_specified),
46 _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
47 _allocs_seen(0) {
48
49 reset_gc_id();
50 create_and_start();
51 _periodic_task.enroll();
52 if (ShenandoahPacing) {
53 _periodic_pacer_notify_task.enroll();
54 }
55 }
56
~ShenandoahControlThread()57 ShenandoahControlThread::~ShenandoahControlThread() {
58 // This is here so that super is called.
59 }
60
task()61 void ShenandoahPeriodicTask::task() {
62 _thread->handle_force_counters_update();
63 _thread->handle_counters_update();
64 }
65
task()66 void ShenandoahPeriodicPacerNotify::task() {
67 assert(ShenandoahPacing, "Should not be here otherwise");
68 ShenandoahHeap::heap()->pacer()->notify_waiters();
69 }
70
run_service()71 void ShenandoahControlThread::run_service() {
72 ShenandoahHeap* heap = ShenandoahHeap::heap();
73
74 GCMode default_mode = concurrent_normal;
75 GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
76 int sleep = ShenandoahControlIntervalMin;
77
78 double last_shrink_time = os::elapsedTime();
79 double last_sleep_adjust_time = os::elapsedTime();
80
81 // Shrink period avoids constantly polling regions for shrinking.
82 // Having a period 10x lower than the delay would mean we hit the
83 // shrinking with lag of less than 1/10-th of true delay.
84 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
85 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
86
87 ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
88 ShenandoahHeuristics* heuristics = heap->heuristics();
89 while (!in_graceful_shutdown() && !should_terminate()) {
90 // Figure out if we have pending requests.
91 bool alloc_failure_pending = _alloc_failure_gc.is_set();
92 bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
93 bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
94
95 // This control loop iteration have seen this much allocations.
96 size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen, memory_order_relaxed);
97
98 // Check if we have seen a new target for soft max heap size.
99 bool soft_max_changed = check_soft_max_changed();
100
101 // Choose which GC mode to run in. The block below should select a single mode.
102 GCMode mode = none;
103 GCCause::Cause cause = GCCause::_last_gc_cause;
104 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
105
106 if (alloc_failure_pending) {
107 // Allocation failure takes precedence: we have to deal with it first thing
108 log_info(gc)("Trigger: Handle Allocation Failure");
109
110 cause = GCCause::_allocation_failure;
111
112 // Consume the degen point, and seed it with default value
113 degen_point = _degen_point;
114 _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
115
116 if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
117 heuristics->record_allocation_failure_gc();
118 policy->record_alloc_failure_to_degenerated(degen_point);
119 mode = stw_degenerated;
120 } else {
121 heuristics->record_allocation_failure_gc();
122 policy->record_alloc_failure_to_full();
123 mode = stw_full;
124 }
125
126 } else if (explicit_gc_requested) {
127 cause = _requested_gc_cause;
128 log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
129
130 heuristics->record_requested_gc();
131
132 if (ExplicitGCInvokesConcurrent) {
133 policy->record_explicit_to_concurrent();
134 mode = default_mode;
135 // Unload and clean up everything
136 heap->set_process_references(heuristics->can_process_references());
137 heap->set_unload_classes(heuristics->can_unload_classes());
138 } else {
139 policy->record_explicit_to_full();
140 mode = stw_full;
141 }
142 } else if (implicit_gc_requested) {
143 cause = _requested_gc_cause;
144 log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
145
146 heuristics->record_requested_gc();
147
148 if (ShenandoahImplicitGCInvokesConcurrent) {
149 policy->record_implicit_to_concurrent();
150 mode = default_mode;
151
152 // Unload and clean up everything
153 heap->set_process_references(heuristics->can_process_references());
154 heap->set_unload_classes(heuristics->can_unload_classes());
155 } else {
156 policy->record_implicit_to_full();
157 mode = stw_full;
158 }
159 } else {
160 // Potential normal cycle: ask heuristics if it wants to act
161 if (heuristics->should_start_gc()) {
162 mode = default_mode;
163 cause = default_cause;
164 }
165
166 // Ask policy if this cycle wants to process references or unload classes
167 heap->set_process_references(heuristics->should_process_references());
168 heap->set_unload_classes(heuristics->should_unload_classes());
169 }
170
171 // Blow all soft references on this cycle, if handling allocation failure,
172 // either implicit or explicit GC request, or we are requested to do so unconditionally.
173 if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) {
174 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
175 }
176
177 bool gc_requested = (mode != none);
178 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
179
180 if (gc_requested) {
181 // GC is starting, bump the internal ID
182 update_gc_id();
183
184 heap->reset_bytes_allocated_since_gc_start();
185
186 // Capture metaspace usage before GC.
187 const size_t metadata_prev_used = MetaspaceUtils::used_bytes();
188
189 // If GC was requested, we are sampling the counters even without actual triggers
190 // from allocation machinery. This captures GC phases more accurately.
191 set_forced_counters_update(true);
192
193 // If GC was requested, we better dump freeset data for performance debugging
194 {
195 ShenandoahHeapLocker locker(heap->lock());
196 heap->free_set()->log_status();
197 }
198
199 switch (mode) {
200 case none:
201 break;
202 case concurrent_normal:
203 service_concurrent_normal_cycle(cause);
204 break;
205 case stw_degenerated:
206 service_stw_degenerated_cycle(cause, degen_point);
207 break;
208 case stw_full:
209 service_stw_full_cycle(cause);
210 break;
211 default:
212 ShouldNotReachHere();
213 }
214
215 // If this was the requested GC cycle, notify waiters about it
216 if (explicit_gc_requested || implicit_gc_requested) {
217 notify_gc_waiters();
218 }
219
220 // If this was the allocation failure GC cycle, notify waiters about it
221 if (alloc_failure_pending) {
222 notify_alloc_failure_waiters();
223 }
224
225 // Report current free set state at the end of cycle, whether
226 // it is a normal completion, or the abort.
227 {
228 ShenandoahHeapLocker locker(heap->lock());
229 heap->free_set()->log_status();
230
231 // Notify Universe about new heap usage. This has implications for
232 // global soft refs policy, and we better report it every time heap
233 // usage goes down.
234 Universe::update_heap_info_at_gc();
235 }
236
237 // Disable forced counters update, and update counters one more time
238 // to capture the state at the end of GC session.
239 handle_force_counters_update();
240 set_forced_counters_update(false);
241
242 // Retract forceful part of soft refs policy
243 heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
244
245 // Clear metaspace oom flag, if current cycle unloaded classes
246 if (heap->unload_classes()) {
247 heuristics->clear_metaspace_oom();
248 }
249
250 // Commit worker statistics to cycle data
251 heap->phase_timings()->flush_par_workers_to_cycle();
252 if (ShenandoahPacing) {
253 heap->pacer()->flush_stats_to_cycle();
254 }
255
256 // Print GC stats for current cycle
257 {
258 LogTarget(Info, gc, stats) lt;
259 if (lt.is_enabled()) {
260 ResourceMark rm;
261 LogStream ls(lt);
262 heap->phase_timings()->print_cycle_on(&ls);
263 if (ShenandoahPacing) {
264 heap->pacer()->print_cycle_on(&ls);
265 }
266 }
267 }
268
269 // Commit statistics to globals
270 heap->phase_timings()->flush_cycle_to_global();
271
272 // Print Metaspace change following GC (if logging is enabled).
273 MetaspaceUtils::print_metaspace_change(metadata_prev_used);
274
275 // GC is over, we are at idle now
276 if (ShenandoahPacing) {
277 heap->pacer()->setup_for_idle();
278 }
279 } else {
280 // Allow allocators to know we have seen this much regions
281 if (ShenandoahPacing && (allocs_seen > 0)) {
282 heap->pacer()->report_alloc(allocs_seen);
283 }
284 }
285
286 double current = os::elapsedTime();
287
288 if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) {
289 // Explicit GC tries to uncommit everything down to min capacity.
290 // Soft max change tries to uncommit everything down to target capacity.
291 // Periodic uncommit tries to uncommit suitable regions down to min capacity.
292
293 double shrink_before = (explicit_gc_requested || soft_max_changed) ?
294 current :
295 current - (ShenandoahUncommitDelay / 1000.0);
296
297 size_t shrink_until = soft_max_changed ?
298 heap->soft_max_capacity() :
299 heap->min_capacity();
300
301 service_uncommit(shrink_before, shrink_until);
302 heap->phase_timings()->flush_cycle_to_global();
303 last_shrink_time = current;
304 }
305
306 // Wait before performing the next action. If allocation happened during this wait,
307 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
308 // back off exponentially.
309 if (_heap_changed.try_unset()) {
310 sleep = ShenandoahControlIntervalMin;
311 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
312 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
313 last_sleep_adjust_time = current;
314 }
315 os::naked_short_sleep(sleep);
316 }
317
318 // Wait for the actual stop(), can't leave run_service() earlier.
319 while (!should_terminate()) {
320 os::naked_short_sleep(ShenandoahControlIntervalMin);
321 }
322 }
323
check_soft_max_changed() const324 bool ShenandoahControlThread::check_soft_max_changed() const {
325 ShenandoahHeap* heap = ShenandoahHeap::heap();
326 size_t new_soft_max = Atomic::load(&ShenandoahSoftMaxHeapSize);
327 size_t old_soft_max = heap->soft_max_capacity();
328 if (new_soft_max != old_soft_max) {
329 new_soft_max = MAX2(heap->min_capacity(), new_soft_max);
330 new_soft_max = MIN2(heap->max_capacity(), new_soft_max);
331 if (new_soft_max != old_soft_max) {
332 log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s",
333 byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max),
334 byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max)
335 );
336 heap->set_soft_max_capacity(new_soft_max);
337 return true;
338 }
339 }
340 return false;
341 }
342
service_concurrent_normal_cycle(GCCause::Cause cause)343 void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
344 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
345 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
346 // If second allocation failure happens during Degenerated GC cycle (for example, when GC
347 // tries to evac something and no memory is available), cycle degrades to Full GC.
348 //
349 // There are also a shortcut through the normal cycle: immediate garbage shortcut, when
350 // heuristics says there are no regions to compact, and all the collection comes from immediately
351 // reclaimable regions.
352 //
353 // ................................................................................................
354 //
355 // (immediate garbage shortcut) Concurrent GC
356 // /-------------------------------------------\
357 // | |
358 // | |
359 // | |
360 // | v
361 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
362 // | | | ^
363 // | (af) | (af) | (af) |
364 // ..................|....................|.................|..............|.......................
365 // | | | |
366 // | | | | Degenerated GC
367 // v v v |
368 // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
369 // | | | ^
370 // | (af) | (af) | (af) |
371 // ..................|....................|.................|..............|.......................
372 // | | | |
373 // | v | | Full GC
374 // \------------------->o<----------------/ |
375 // | |
376 // v |
377 // Full GC --------------------------/
378 //
379 ShenandoahHeap* heap = ShenandoahHeap::heap();
380
381 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
382
383 GCIdMark gc_id_mark;
384 ShenandoahGCSession session(cause);
385
386 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
387
388 // Reset for upcoming marking
389 heap->entry_reset();
390
391 // Start initial mark under STW
392 heap->vmop_entry_init_mark();
393
394 // Continue concurrent mark
395 heap->entry_mark();
396 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
397
398 // If not cancelled, can try to concurrently pre-clean
399 heap->entry_preclean();
400
401 // Complete marking under STW, and start evacuation
402 heap->vmop_entry_final_mark();
403
404 // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
405 // the space. This would be the last action if there is nothing to evacuate.
406 heap->entry_cleanup_early();
407
408 {
409 ShenandoahHeapLocker locker(heap->lock());
410 heap->free_set()->log_status();
411 }
412
413 // Continue the cycle with evacuation and optional update-refs.
414 // This may be skipped if there is nothing to evacuate.
415 // If so, evac_in_progress would be unset by collection set preparation code.
416 if (heap->is_evacuation_in_progress()) {
417 // Concurrently evacuate
418 heap->entry_evac();
419 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
420
421 // Perform update-refs phase.
422 heap->vmop_entry_init_updaterefs();
423 heap->entry_updaterefs();
424 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
425
426 heap->vmop_entry_final_updaterefs();
427
428 // Update references freed up collection set, kick the cleanup to reclaim the space.
429 heap->entry_cleanup_complete();
430 }
431
432 // Cycle is complete
433 heap->heuristics()->record_success_concurrent();
434 heap->shenandoah_policy()->record_success_concurrent();
435 }
436
check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point)437 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
438 ShenandoahHeap* heap = ShenandoahHeap::heap();
439 if (heap->cancelled_gc()) {
440 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
441 if (!in_graceful_shutdown()) {
442 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
443 "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
444 _degen_point = point;
445 }
446 return true;
447 }
448 return false;
449 }
450
stop_service()451 void ShenandoahControlThread::stop_service() {
452 // Nothing to do here.
453 }
454
service_stw_full_cycle(GCCause::Cause cause)455 void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
456 GCIdMark gc_id_mark;
457 ShenandoahGCSession session(cause);
458
459 ShenandoahHeap* heap = ShenandoahHeap::heap();
460 heap->vmop_entry_full(cause);
461
462 heap->heuristics()->record_success_full();
463 heap->shenandoah_policy()->record_success_full();
464 }
465
service_stw_degenerated_cycle(GCCause::Cause cause,ShenandoahHeap::ShenandoahDegenPoint point)466 void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
467 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
468
469 GCIdMark gc_id_mark;
470 ShenandoahGCSession session(cause);
471
472 ShenandoahHeap* heap = ShenandoahHeap::heap();
473 heap->vmop_degenerated(point);
474
475 heap->heuristics()->record_success_degenerated();
476 heap->shenandoah_policy()->record_success_degenerated();
477 }
478
service_uncommit(double shrink_before,size_t shrink_until)479 void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) {
480 ShenandoahHeap* heap = ShenandoahHeap::heap();
481
482 // Determine if there is work to do. This avoids taking heap lock if there is
483 // no work available, avoids spamming logs with superfluous logging messages,
484 // and minimises the amount of work while locks are taken.
485
486 if (heap->committed() <= shrink_until) return;
487
488 bool has_work = false;
489 for (size_t i = 0; i < heap->num_regions(); i++) {
490 ShenandoahHeapRegion *r = heap->get_region(i);
491 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
492 has_work = true;
493 break;
494 }
495 }
496
497 if (has_work) {
498 heap->entry_uncommit(shrink_before, shrink_until);
499 }
500 }
501
is_explicit_gc(GCCause::Cause cause) const502 bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
503 return GCCause::is_user_requested_gc(cause) ||
504 GCCause::is_serviceability_requested_gc(cause);
505 }
506
request_gc(GCCause::Cause cause)507 void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
508 assert(GCCause::is_user_requested_gc(cause) ||
509 GCCause::is_serviceability_requested_gc(cause) ||
510 cause == GCCause::_metadata_GC_clear_soft_refs ||
511 cause == GCCause::_full_gc_alot ||
512 cause == GCCause::_wb_full_gc ||
513 cause == GCCause::_scavenge_alot,
514 "only requested GCs here");
515
516 if (is_explicit_gc(cause)) {
517 if (!DisableExplicitGC) {
518 handle_requested_gc(cause);
519 }
520 } else {
521 handle_requested_gc(cause);
522 }
523 }
524
handle_requested_gc(GCCause::Cause cause)525 void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
526 // Make sure we have at least one complete GC cycle before unblocking
527 // from the explicit GC request.
528 //
529 // This is especially important for weak references cleanup and/or native
530 // resources (e.g. DirectByteBuffers) machinery: when explicit GC request
531 // comes very late in the already running cycle, it would miss lots of new
532 // opportunities for cleanup that were made available before the caller
533 // requested the GC.
534
535 MonitorLockerEx ml(&_gc_waiters_lock);
536 size_t current_gc_id = get_gc_id();
537 size_t required_gc_id = current_gc_id + 1;
538 while (current_gc_id < required_gc_id) {
539 _gc_requested.set();
540 _requested_gc_cause = cause;
541 ml.wait();
542 current_gc_id = get_gc_id();
543 }
544 }
545
handle_alloc_failure(ShenandoahAllocRequest & req)546 void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
547 ShenandoahHeap* heap = ShenandoahHeap::heap();
548
549 assert(current()->is_Java_thread(), "expect Java thread here");
550
551 if (try_set_alloc_failure_gc()) {
552 // Only report the first allocation failure
553 log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
554 req.type_string(),
555 byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
556
557 // Now that alloc failure GC is scheduled, we can abort everything else
558 heap->cancel_gc(GCCause::_allocation_failure);
559 }
560
561 MonitorLockerEx ml(&_alloc_failure_waiters_lock);
562 while (is_alloc_failure_gc()) {
563 ml.wait();
564 }
565 }
566
handle_alloc_failure_evac(size_t words)567 void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
568 ShenandoahHeap* heap = ShenandoahHeap::heap();
569
570 if (try_set_alloc_failure_gc()) {
571 // Only report the first allocation failure
572 log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
573 byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
574 }
575
576 // Forcefully report allocation failure
577 heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
578 }
579
notify_alloc_failure_waiters()580 void ShenandoahControlThread::notify_alloc_failure_waiters() {
581 _alloc_failure_gc.unset();
582 MonitorLockerEx ml(&_alloc_failure_waiters_lock);
583 ml.notify_all();
584 }
585
try_set_alloc_failure_gc()586 bool ShenandoahControlThread::try_set_alloc_failure_gc() {
587 return _alloc_failure_gc.try_set();
588 }
589
is_alloc_failure_gc()590 bool ShenandoahControlThread::is_alloc_failure_gc() {
591 return _alloc_failure_gc.is_set();
592 }
593
notify_gc_waiters()594 void ShenandoahControlThread::notify_gc_waiters() {
595 _gc_requested.unset();
596 MonitorLockerEx ml(&_gc_waiters_lock);
597 ml.notify_all();
598 }
599
handle_counters_update()600 void ShenandoahControlThread::handle_counters_update() {
601 if (_do_counters_update.is_set()) {
602 _do_counters_update.unset();
603 ShenandoahHeap::heap()->monitoring_support()->update_counters();
604 }
605 }
606
handle_force_counters_update()607 void ShenandoahControlThread::handle_force_counters_update() {
608 if (_force_counters_update.is_set()) {
609 _do_counters_update.unset(); // reset these too, we do update now!
610 ShenandoahHeap::heap()->monitoring_support()->update_counters();
611 }
612 }
613
notify_heap_changed()614 void ShenandoahControlThread::notify_heap_changed() {
615 // This is called from allocation path, and thus should be fast.
616
617 // Update monitoring counters when we took a new region. This amortizes the
618 // update costs on slow path.
619 if (_do_counters_update.is_unset()) {
620 _do_counters_update.set();
621 }
622 // Notify that something had changed.
623 if (_heap_changed.is_unset()) {
624 _heap_changed.set();
625 }
626 }
627
pacing_notify_alloc(size_t words)628 void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
629 assert(ShenandoahPacing, "should only call when pacing is enabled");
630 Atomic::add(words, &_allocs_seen, memory_order_relaxed);
631 }
632
set_forced_counters_update(bool value)633 void ShenandoahControlThread::set_forced_counters_update(bool value) {
634 _force_counters_update.set_cond(value);
635 }
636
reset_gc_id()637 void ShenandoahControlThread::reset_gc_id() {
638 OrderAccess::release_store_fence(&_gc_id, (size_t)0);
639 }
640
update_gc_id()641 void ShenandoahControlThread::update_gc_id() {
642 Atomic::inc(&_gc_id);
643 }
644
get_gc_id()645 size_t ShenandoahControlThread::get_gc_id() {
646 return OrderAccess::load_acquire(&_gc_id);
647 }
648
print() const649 void ShenandoahControlThread::print() const {
650 print_on(tty);
651 }
652
print_on(outputStream * st) const653 void ShenandoahControlThread::print_on(outputStream* st) const {
654 st->print("Shenandoah Concurrent Thread");
655 Thread::print_on(st);
656 st->cr();
657 }
658
start()659 void ShenandoahControlThread::start() {
660 create_and_start();
661 }
662
prepare_for_graceful_shutdown()663 void ShenandoahControlThread::prepare_for_graceful_shutdown() {
664 _graceful_shutdown.set();
665 }
666
in_graceful_shutdown()667 bool ShenandoahControlThread::in_graceful_shutdown() {
668 return _graceful_shutdown.is_set();
669 }
670