1 /*
2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahPacer.hpp"
30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/mutexLocker.hpp"
33
34 /*
35 * In normal concurrent cycle, we have to pace the application to let GC finish.
36 *
37 * Here, we do not know how large would be the collection set, and what are the
38 * relative performances of the each stage in the concurrent cycle, and so we have to
39 * make some assumptions.
40 *
41 * For concurrent mark, there is no clear notion of progress. The moderately accurate
42 * and easy to get metric is the amount of live objects the mark had encountered. But,
43 * that does directly correlate with the used heap, because the heap might be fully
44 * dead or fully alive. We cannot assume either of the extremes: we would either allow
45 * application to run out of memory if we assume heap is fully dead but it is not, and,
46 * conversely, we would pacify application excessively if we assume heap is fully alive
47 * but it is not. So we need to guesstimate the particular expected value for heap liveness.
48 * The best way to do this is apparently recording the past history.
49 *
50 * For concurrent evac and update-refs, we are walking the heap per-region, and so the
51 * notion of progress is clear: we get reported the "used" size from the processed regions
52 * and use the global heap-used as the baseline.
53 *
54 * The allocatable space when GC is running is "free" at the start of phase, but the
55 * accounted budget is based on "used". So, we need to adjust the tax knowing that.
56 */
57
setup_for_mark()58 void ShenandoahPacer::setup_for_mark() {
59 assert(ShenandoahPacing, "Only be here when pacing is enabled");
60
61 size_t live = update_and_get_progress_history();
62 size_t free = _heap->free_set()->available();
63
64 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
65 size_t taxable = free - non_taxable;
66
67 double tax = 1.0 * live / taxable; // base tax for available free space
68 tax *= 1; // mark can succeed with immediate garbage, claim all available space
69 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
70
71 restart_with(non_taxable, tax);
72
73 log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
74 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
75 byte_size_in_proper_unit(live), proper_unit_for_byte_size(live),
76 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
77 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
78 tax);
79 }
80
setup_for_evac()81 void ShenandoahPacer::setup_for_evac() {
82 assert(ShenandoahPacing, "Only be here when pacing is enabled");
83
84 size_t used = _heap->collection_set()->used();
85 size_t free = _heap->free_set()->available();
86
87 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
88 size_t taxable = free - non_taxable;
89
90 double tax = 1.0 * used / taxable; // base tax for available free space
91 tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free
92 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase
93 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
94
95 restart_with(non_taxable, tax);
96
97 log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
98 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
99 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
100 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
101 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
102 tax);
103 }
104
setup_for_updaterefs()105 void ShenandoahPacer::setup_for_updaterefs() {
106 assert(ShenandoahPacing, "Only be here when pacing is enabled");
107
108 size_t used = _heap->used();
109 size_t free = _heap->free_set()->available();
110
111 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100;
112 size_t taxable = free - non_taxable;
113
114 double tax = 1.0 * used / taxable; // base tax for available free space
115 tax *= 1; // update-refs is the last phase, claim the remaining free
116 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase
117 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap
118
119 restart_with(non_taxable, tax);
120
121 log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, "
122 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
123 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
124 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
125 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable),
126 tax);
127 }
128
129 /*
130 * In idle phase, we have to pace the application to let control thread react with GC start.
131 *
132 * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges
133 * it had seen recent allocations. It will naturally pace the allocations if control thread is
134 * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget
135 * for applications to allocate at.
136 */
137
setup_for_idle()138 void ShenandoahPacer::setup_for_idle() {
139 assert(ShenandoahPacing, "Only be here when pacing is enabled");
140
141 size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack;
142 double tax = 1;
143
144 restart_with(initial, tax);
145
146 log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx",
147 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial),
148 tax);
149 }
150
151 /*
152 * There is no useful notion of progress for these operations. To avoid stalling
153 * the allocators unnecessarily, allow them to run unimpeded.
154 */
155
setup_for_preclean()156 void ShenandoahPacer::setup_for_preclean() {
157 assert(ShenandoahPacing, "Only be here when pacing is enabled");
158
159 size_t initial = _heap->max_capacity();
160 restart_with(initial, 1.0);
161
162 log_info(gc, ergo)("Pacer for Precleaning. Non-Taxable: " SIZE_FORMAT "%s",
163 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial));
164 }
165
setup_for_reset()166 void ShenandoahPacer::setup_for_reset() {
167 assert(ShenandoahPacing, "Only be here when pacing is enabled");
168
169 size_t initial = _heap->max_capacity();
170 restart_with(initial, 1.0);
171
172 log_info(gc, ergo)("Pacer for Reset. Non-Taxable: " SIZE_FORMAT "%s",
173 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial));
174 }
175
update_and_get_progress_history()176 size_t ShenandoahPacer::update_and_get_progress_history() {
177 if (_progress == -1) {
178 // First initialization, report some prior
179 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
180 return (size_t) (_heap->max_capacity() * 0.1);
181 } else {
182 // Record history, and reply historical data
183 _progress_history->add(_progress);
184 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
185 return (size_t) (_progress_history->avg() * HeapWordSize);
186 }
187 }
188
restart_with(size_t non_taxable_bytes,double tax_rate)189 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
190 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
191 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
192 Atomic::xchg(&_budget, (intptr_t)initial);
193 Atomic::store(&_tax_rate, tax_rate);
194 Atomic::inc(&_epoch);
195
196 // Shake up stalled waiters after budget update.
197 _need_notify_waiters.try_set();
198 }
199
claim_for_alloc(size_t words,bool force)200 bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) {
201 assert(ShenandoahPacing, "Only be here when pacing is enabled");
202
203 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
204
205 intptr_t cur = 0;
206 intptr_t new_val = 0;
207 do {
208 cur = Atomic::load(&_budget);
209 if (cur < tax && !force) {
210 // Progress depleted, alas.
211 return false;
212 }
213 new_val = cur - tax;
214 } while (Atomic::cmpxchg(&_budget, cur, new_val) != cur);
215 return true;
216 }
217
unpace_for_alloc(intptr_t epoch,size_t words)218 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) {
219 assert(ShenandoahPacing, "Only be here when pacing is enabled");
220
221 if (_epoch != epoch) {
222 // Stale ticket, no need to unpace.
223 return;
224 }
225
226 size_t tax = MAX2<size_t>(1, words * Atomic::load(&_tax_rate));
227 add_budget(tax);
228 }
229
epoch()230 intptr_t ShenandoahPacer::epoch() {
231 return Atomic::load(&_epoch);
232 }
233
pace_for_alloc(size_t words)234 void ShenandoahPacer::pace_for_alloc(size_t words) {
235 assert(ShenandoahPacing, "Only be here when pacing is enabled");
236
237 // Fast path: try to allocate right away
238 bool claimed = claim_for_alloc(words, false);
239 if (claimed) {
240 return;
241 }
242
243 // Forcefully claim the budget: it may go negative at this point, and
244 // GC should replenish for this and subsequent allocations. After this claim,
245 // we would wait a bit until our claim is matched by additional progress,
246 // or the time budget depletes.
247 claimed = claim_for_alloc(words, true);
248 assert(claimed, "Should always succeed");
249
250 // Threads that are attaching should not block at all: they are not
251 // fully initialized yet. Blocking them would be awkward.
252 // This is probably the path that allocates the thread oop itself.
253 if (JavaThread::current()->is_attaching_via_jni()) {
254 return;
255 }
256
257 double start = os::elapsedTime();
258
259 size_t max_ms = ShenandoahPacingMaxDelay;
260 size_t total_ms = 0;
261
262 while (true) {
263 // We could instead assist GC, but this would suffice for now.
264 size_t cur_ms = (max_ms > total_ms) ? (max_ms - total_ms) : 1;
265 wait(cur_ms);
266
267 double end = os::elapsedTime();
268 total_ms = (size_t)((end - start) * 1000);
269
270 if (total_ms > max_ms || Atomic::load(&_budget) >= 0) {
271 // Exiting if either:
272 // a) Spent local time budget to wait for enough GC progress.
273 // Breaking out and allocating anyway, which may mean we outpace GC,
274 // and start Degenerated GC cycle.
275 // b) The budget had been replenished, which means our claim is satisfied.
276 ShenandoahThreadLocalData::add_paced_time(JavaThread::current(), end - start);
277 break;
278 }
279 }
280 }
281
wait(size_t time_ms)282 void ShenandoahPacer::wait(size_t time_ms) {
283 // Perform timed wait. It works like like sleep(), except without modifying
284 // the thread interruptible status. MonitorLocker also checks for safepoints.
285 assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify");
286 assert(time_ms <= LONG_MAX, "Sanity");
287 MonitorLocker locker(_wait_monitor);
288 _wait_monitor->wait((long)time_ms);
289 }
290
notify_waiters()291 void ShenandoahPacer::notify_waiters() {
292 if (_need_notify_waiters.try_unset()) {
293 MonitorLocker locker(_wait_monitor);
294 _wait_monitor->notify_all();
295 }
296 }
297
flush_stats_to_cycle()298 void ShenandoahPacer::flush_stats_to_cycle() {
299 double sum = 0;
300 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
301 sum += ShenandoahThreadLocalData::paced_time(t);
302 }
303 ShenandoahHeap::heap()->phase_timings()->record_phase_time(ShenandoahPhaseTimings::pacing, sum);
304 }
305
print_cycle_on(outputStream * out)306 void ShenandoahPacer::print_cycle_on(outputStream* out) {
307 MutexLocker lock(Threads_lock);
308
309 double now = os::elapsedTime();
310 double total = now - _last_time;
311 _last_time = now;
312
313 out->cr();
314 out->print_cr("Allocation pacing accrued:");
315
316 size_t threads_total = 0;
317 size_t threads_nz = 0;
318 double sum = 0;
319 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
320 double d = ShenandoahThreadLocalData::paced_time(t);
321 if (d > 0) {
322 threads_nz++;
323 sum += d;
324 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s",
325 d * 1000, total * 1000, d/total*100, t->name());
326 }
327 threads_total++;
328 ShenandoahThreadLocalData::reset_paced_time(t);
329 }
330 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <total>",
331 sum * 1000, total * 1000, sum/total*100);
332
333 if (threads_total > 0) {
334 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average total>",
335 sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100);
336 }
337 if (threads_nz > 0) {
338 out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): <average non-zero>",
339 sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100);
340 }
341 out->cr();
342 }
343