1 /*
2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
28 #include "gc/g1/g1Policy.hpp"
29 #include "gc/g1/g1VMOperations.hpp"
30 #include "gc/shared/concurrentGCBreakpoints.hpp"
31 #include "gc/shared/gcCause.hpp"
32 #include "gc/shared/gcId.hpp"
33 #include "gc/shared/gcTimer.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/isGCActiveMark.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/interfaceSupport.inline.hpp"
38 
doit()39 void VM_G1CollectFull::doit() {
40   G1CollectedHeap* g1h = G1CollectedHeap::heap();
41   GCCauseSetter x(g1h, _gc_cause);
42   _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */);
43 }
44 
VM_G1TryInitiateConcMark(uint gc_count_before,GCCause::Cause gc_cause,double target_pause_time_ms)45 VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
46                                                    GCCause::Cause gc_cause,
47                                                    double target_pause_time_ms) :
48   VM_GC_Operation(gc_count_before, gc_cause),
49   _target_pause_time_ms(target_pause_time_ms),
50   _transient_failure(false),
51   _cycle_already_in_progress(false),
52   _whitebox_attached(false),
53   _terminating(false),
54   _gc_succeeded(false)
55 {}
56 
doit_prologue()57 bool VM_G1TryInitiateConcMark::doit_prologue() {
58   bool result = VM_GC_Operation::doit_prologue();
59   // The prologue can fail for a couple of reasons. The first is that another GC
60   // got scheduled and prevented the scheduling of the initial mark GC. The
61   // second is that the GC locker may be active and the heap can't be expanded.
62   // In both cases we want to retry the GC so that the initial mark pause is
63   // actually scheduled. In the second case, however, we should stall until
64   // until the GC locker is no longer active and then retry the initial mark GC.
65   if (!result) _transient_failure = true;
66   return result;
67 }
68 
doit()69 void VM_G1TryInitiateConcMark::doit() {
70   G1CollectedHeap* g1h = G1CollectedHeap::heap();
71 
72   GCCauseSetter x(g1h, _gc_cause);
73 
74   // Record for handling by caller.
75   _terminating = g1h->_cm_thread->should_terminate();
76 
77   if (_terminating && GCCause::is_user_requested_gc(_gc_cause)) {
78     // When terminating, the request to initiate a concurrent cycle will be
79     // ignored by do_collection_pause_at_safepoint; instead it will just do
80     // a young-only or mixed GC (depending on phase).  For a user request
81     // there's no point in even doing that much, so done.  For some non-user
82     // requests the alternative GC might still be needed.
83   } else if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) {
84     // Failure to force the next GC pause to be an initial mark indicates
85     // there is already a concurrent marking cycle in progress.  Set flag
86     // to notify the caller and return immediately.
87     _cycle_already_in_progress = true;
88   } else if ((_gc_cause != GCCause::_wb_breakpoint) &&
89              ConcurrentGCBreakpoints::is_controlled()) {
90     // WhiteBox wants to be in control of concurrent cycles, so don't try to
91     // start one.  This check is after the force_initial_mark_xxx so that a
92     // request will be remembered for a later partial collection, even though
93     // we've rejected this request.
94     _whitebox_attached = true;
95   } else if (g1h->do_collection_pause_at_safepoint(_target_pause_time_ms)) {
96     _gc_succeeded = true;
97   } else {
98     // Failure to perform the collection at all occurs because GCLocker is
99     // active, and we have the bad luck to be the collection request that
100     // makes a later _gc_locker collection needed.  (Else we would have hit
101     // the GCLocker check in the prologue.)
102     _transient_failure = true;
103   }
104 }
105 
VM_G1CollectForAllocation(size_t word_size,uint gc_count_before,GCCause::Cause gc_cause,double target_pause_time_ms)106 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t         word_size,
107                                                      uint           gc_count_before,
108                                                      GCCause::Cause gc_cause,
109                                                      double         target_pause_time_ms) :
110   VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
111   _gc_succeeded(false),
112   _target_pause_time_ms(target_pause_time_ms) {
113 
114   guarantee(target_pause_time_ms > 0.0,
115             "target_pause_time_ms = %1.6lf should be positive",
116             target_pause_time_ms);
117   _gc_cause = gc_cause;
118 }
119 
doit()120 void VM_G1CollectForAllocation::doit() {
121   G1CollectedHeap* g1h = G1CollectedHeap::heap();
122 
123   if (_word_size > 0) {
124     // An allocation has been requested. So, try to do that first.
125     _result = g1h->attempt_allocation_at_safepoint(_word_size,
126                                                    false /* expect_null_cur_alloc_region */);
127     if (_result != NULL) {
128       // If we can successfully allocate before we actually do the
129       // pause then we will consider this pause successful.
130       _gc_succeeded = true;
131       return;
132     }
133   }
134 
135   GCCauseSetter x(g1h, _gc_cause);
136   // Try a partial collection of some kind.
137   _gc_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
138 
139   if (_gc_succeeded && (_word_size > 0)) {
140     // An allocation had been requested. Do it, eventually trying a stronger
141     // kind of GC.
142     _result = g1h->satisfy_failed_allocation(_word_size, &_gc_succeeded);
143   }
144 }
145 
doit()146 void VM_G1Concurrent::doit() {
147   GCIdMark gc_id_mark(_gc_id);
148   GCTraceCPUTime tcpu;
149   G1CollectedHeap* g1h = G1CollectedHeap::heap();
150 
151   // GCTraceTime(...) only supports sub-phases, so a more verbose version
152   // is needed when we report the top-level pause phase.
153   GCTraceTimeLogger(Info, gc) logger(_message, GCCause::_no_gc, true);
154   GCTraceTimePauseTimer       timer(_message, g1h->concurrent_mark()->gc_timer_cm());
155   GCTraceTimeDriver           t(&logger, &timer);
156 
157   TraceCollectorStats tcs(g1h->g1mm()->conc_collection_counters());
158   SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
159   IsGCActiveMark x;
160   _cl->do_void();
161 }
162 
doit_prologue()163 bool VM_G1Concurrent::doit_prologue() {
164   Heap_lock->lock();
165   return true;
166 }
167 
doit_epilogue()168 void VM_G1Concurrent::doit_epilogue() {
169   if (Universe::has_reference_pending_list()) {
170     Heap_lock->notify_all();
171   }
172   Heap_lock->unlock();
173 }
174