1 /*
2 * Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/adaptiveSizePolicy.hpp"
27 #include "gc/shared/collectorPolicy.hpp"
28 #include "gc/shared/gcCause.hpp"
29 #include "gc/shared/gcUtil.inline.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/workgroup.hpp"
32 #include "logging/log.hpp"
33 #include "runtime/timer.hpp"
34 #include "utilities/ostream.hpp"
35
36 elapsedTimer AdaptiveSizePolicy::_minor_timer;
37 elapsedTimer AdaptiveSizePolicy::_major_timer;
38 bool AdaptiveSizePolicy::_debug_perturbation = false;
39
40 // The throughput goal is implemented as
41 // _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
42 // gc_cost_ratio is the ratio
43 // application cost / gc cost
44 // For example a gc_cost_ratio of 4 translates into a
45 // throughput goal of .80
46
AdaptiveSizePolicy(size_t init_eden_size,size_t init_promo_size,size_t init_survivor_size,double gc_pause_goal_sec,uint gc_cost_ratio)47 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
48 size_t init_promo_size,
49 size_t init_survivor_size,
50 double gc_pause_goal_sec,
51 uint gc_cost_ratio) :
52 _eden_size(init_eden_size),
53 _promo_size(init_promo_size),
54 _survivor_size(init_survivor_size),
55 _gc_pause_goal_sec(gc_pause_goal_sec),
56 _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
57 _gc_overhead_limit_exceeded(false),
58 _print_gc_overhead_limit_would_be_exceeded(false),
59 _gc_overhead_limit_count(0),
60 _latest_minor_mutator_interval_seconds(0),
61 _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
62 _young_gen_change_for_minor_throughput(0),
63 _old_gen_change_for_major_throughput(0) {
64 assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
65 "No opportunity to clear SoftReferences before GC overhead limit");
66 _avg_minor_pause =
67 new AdaptivePaddedAverage(AdaptiveTimeWeight, PausePadding);
68 _avg_minor_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
69 _avg_minor_gc_cost = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
70 _avg_major_gc_cost = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
71
72 _avg_young_live = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
73 _avg_old_live = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
74 _avg_eden_live = new AdaptiveWeightedAverage(AdaptiveSizePolicyWeight);
75
76 _avg_survived = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
77 SurvivorPadding);
78 _avg_pretenured = new AdaptivePaddedNoZeroDevAverage(
79 AdaptiveSizePolicyWeight,
80 SurvivorPadding);
81
82 _minor_pause_old_estimator =
83 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
84 _minor_pause_young_estimator =
85 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
86 _minor_collection_estimator =
87 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
88 _major_collection_estimator =
89 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
90
91 // Start the timers
92 _minor_timer.start();
93
94 _young_gen_policy_is_ready = false;
95 }
96
97 // If the number of GC threads was set on the command line,
98 // use it.
99 // Else
100 // Calculate the number of GC threads based on the number of Java threads.
101 // Calculate the number of GC threads based on the size of the heap.
102 // Use the larger.
103
calc_default_active_workers(uintx total_workers,const uintx min_workers,uintx active_workers,uintx application_workers)104 uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
105 const uintx min_workers,
106 uintx active_workers,
107 uintx application_workers) {
108 // If the user has specifically set the number of
109 // GC threads, use them.
110
111 // If the user has turned off using a dynamic number of GC threads
112 // or the users has requested a specific number, set the active
113 // number of workers to all the workers.
114
115 uintx new_active_workers = total_workers;
116 uintx prev_active_workers = active_workers;
117 uintx active_workers_by_JT = 0;
118 uintx active_workers_by_heap_size = 0;
119
120 // Always use at least min_workers but use up to
121 // GCThreadsPerJavaThreads * application threads.
122 active_workers_by_JT =
123 MAX2((uintx) GCWorkersPerJavaThread * application_workers,
124 min_workers);
125
126 // Choose a number of GC threads based on the current size
127 // of the heap. This may be complicated because the size of
128 // the heap depends on factors such as the throughput goal.
129 // Still a large heap should be collected by more GC threads.
130 active_workers_by_heap_size =
131 MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
132
133 uintx max_active_workers =
134 MAX2(active_workers_by_JT, active_workers_by_heap_size);
135
136 new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
137
138 // Increase GC workers instantly but decrease them more
139 // slowly.
140 if (new_active_workers < prev_active_workers) {
141 new_active_workers =
142 MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
143 }
144
145 // Check once more that the number of workers is within the limits.
146 assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
147 assert(new_active_workers >= min_workers, "Minimum workers not observed");
148 assert(new_active_workers <= total_workers, "Total workers not observed");
149
150 if (ForceDynamicNumberOfGCThreads) {
151 // Assume this is debugging and jiggle the number of GC threads.
152 if (new_active_workers == prev_active_workers) {
153 if (new_active_workers < total_workers) {
154 new_active_workers++;
155 } else if (new_active_workers > min_workers) {
156 new_active_workers--;
157 }
158 }
159 if (new_active_workers == total_workers) {
160 if (_debug_perturbation) {
161 new_active_workers = min_workers;
162 }
163 _debug_perturbation = !_debug_perturbation;
164 }
165 assert((new_active_workers <= ParallelGCThreads) &&
166 (new_active_workers >= min_workers),
167 "Jiggled active workers too much");
168 }
169
170 log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
171 "active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
172 "prev_active_workers: " UINTX_FORMAT "\n"
173 " active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
174 active_workers, new_active_workers, prev_active_workers,
175 active_workers_by_JT, active_workers_by_heap_size);
176 assert(new_active_workers > 0, "Always need at least 1");
177 return new_active_workers;
178 }
179
calc_active_workers(uintx total_workers,uintx active_workers,uintx application_workers)180 uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
181 uintx active_workers,
182 uintx application_workers) {
183 // If the user has specifically set the number of
184 // GC threads, use them.
185
186 // If the user has turned off using a dynamic number of GC threads
187 // or the users has requested a specific number, set the active
188 // number of workers to all the workers.
189
190 uint new_active_workers;
191 if (!UseDynamicNumberOfGCThreads ||
192 (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
193 new_active_workers = total_workers;
194 } else {
195 uintx min_workers = (total_workers == 1) ? 1 : 2;
196 new_active_workers = calc_default_active_workers(total_workers,
197 min_workers,
198 active_workers,
199 application_workers);
200 }
201 assert(new_active_workers > 0, "Always need at least 1");
202 return new_active_workers;
203 }
204
calc_active_conc_workers(uintx total_workers,uintx active_workers,uintx application_workers)205 uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
206 uintx active_workers,
207 uintx application_workers) {
208 if (!UseDynamicNumberOfGCThreads ||
209 (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
210 return ConcGCThreads;
211 } else {
212 uint no_of_gc_threads = calc_default_active_workers(total_workers,
213 1, /* Minimum number of workers */
214 active_workers,
215 application_workers);
216 return no_of_gc_threads;
217 }
218 }
219
tenuring_threshold_change() const220 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
221 return decrement_tenuring_threshold_for_gc_cost() ||
222 increment_tenuring_threshold_for_gc_cost() ||
223 decrement_tenuring_threshold_for_survivor_limit();
224 }
225
minor_collection_begin()226 void AdaptiveSizePolicy::minor_collection_begin() {
227 // Update the interval time
228 _minor_timer.stop();
229 // Save most recent collection time
230 _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
231 _minor_timer.reset();
232 _minor_timer.start();
233 }
234
update_minor_pause_young_estimator(double minor_pause_in_ms)235 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
236 double minor_pause_in_ms) {
237 double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
238 _minor_pause_young_estimator->update(eden_size_in_mbytes,
239 minor_pause_in_ms);
240 }
241
minor_collection_end(GCCause::Cause gc_cause)242 void AdaptiveSizePolicy::minor_collection_end(GCCause::Cause gc_cause) {
243 // Update the pause time.
244 _minor_timer.stop();
245
246 if (!GCCause::is_user_requested_gc(gc_cause) ||
247 UseAdaptiveSizePolicyWithSystemGC) {
248 double minor_pause_in_seconds = _minor_timer.seconds();
249 double minor_pause_in_ms = minor_pause_in_seconds * MILLIUNITS;
250
251 // Sample for performance counter
252 _avg_minor_pause->sample(minor_pause_in_seconds);
253
254 // Cost of collection (unit-less)
255 double collection_cost = 0.0;
256 if ((_latest_minor_mutator_interval_seconds > 0.0) &&
257 (minor_pause_in_seconds > 0.0)) {
258 double interval_in_seconds =
259 _latest_minor_mutator_interval_seconds + minor_pause_in_seconds;
260 collection_cost =
261 minor_pause_in_seconds / interval_in_seconds;
262 _avg_minor_gc_cost->sample(collection_cost);
263 // Sample for performance counter
264 _avg_minor_interval->sample(interval_in_seconds);
265 }
266
267 // The policy does not have enough data until at least some
268 // young collections have been done.
269 _young_gen_policy_is_ready =
270 (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold);
271
272 // Calculate variables used to estimate pause time vs. gen sizes
273 double eden_size_in_mbytes = ((double)_eden_size) / ((double)M);
274 update_minor_pause_young_estimator(minor_pause_in_ms);
275 update_minor_pause_old_estimator(minor_pause_in_ms);
276
277 log_trace(gc, ergo)("AdaptiveSizePolicy::minor_collection_end: minor gc cost: %f average: %f",
278 collection_cost, _avg_minor_gc_cost->average());
279 log_trace(gc, ergo)(" minor pause: %f minor period %f",
280 minor_pause_in_ms, _latest_minor_mutator_interval_seconds * MILLIUNITS);
281
282 // Calculate variable used to estimate collection cost vs. gen sizes
283 assert(collection_cost >= 0.0, "Expected to be non-negative");
284 _minor_collection_estimator->update(eden_size_in_mbytes, collection_cost);
285 }
286
287 // Interval times use this timer to measure the mutator time.
288 // Reset the timer after the GC pause.
289 _minor_timer.reset();
290 _minor_timer.start();
291 }
292
eden_increment(size_t cur_eden,uint percent_change)293 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) {
294 size_t eden_heap_delta;
295 eden_heap_delta = cur_eden / 100 * percent_change;
296 return eden_heap_delta;
297 }
298
eden_increment(size_t cur_eden)299 size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden) {
300 return eden_increment(cur_eden, YoungGenerationSizeIncrement);
301 }
302
eden_decrement(size_t cur_eden)303 size_t AdaptiveSizePolicy::eden_decrement(size_t cur_eden) {
304 size_t eden_heap_delta = eden_increment(cur_eden) /
305 AdaptiveSizeDecrementScaleFactor;
306 return eden_heap_delta;
307 }
308
promo_increment(size_t cur_promo,uint percent_change)309 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) {
310 size_t promo_heap_delta;
311 promo_heap_delta = cur_promo / 100 * percent_change;
312 return promo_heap_delta;
313 }
314
promo_increment(size_t cur_promo)315 size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo) {
316 return promo_increment(cur_promo, TenuredGenerationSizeIncrement);
317 }
318
promo_decrement(size_t cur_promo)319 size_t AdaptiveSizePolicy::promo_decrement(size_t cur_promo) {
320 size_t promo_heap_delta = promo_increment(cur_promo);
321 promo_heap_delta = promo_heap_delta / AdaptiveSizeDecrementScaleFactor;
322 return promo_heap_delta;
323 }
324
time_since_major_gc() const325 double AdaptiveSizePolicy::time_since_major_gc() const {
326 _major_timer.stop();
327 double result = _major_timer.seconds();
328 _major_timer.start();
329 return result;
330 }
331
332 // Linear decay of major gc cost
decaying_major_gc_cost() const333 double AdaptiveSizePolicy::decaying_major_gc_cost() const {
334 double major_interval = major_gc_interval_average_for_decay();
335 double major_gc_cost_average = major_gc_cost();
336 double decayed_major_gc_cost = major_gc_cost_average;
337 if(time_since_major_gc() > 0.0) {
338 decayed_major_gc_cost = major_gc_cost() *
339 (((double) AdaptiveSizeMajorGCDecayTimeScale) * major_interval)
340 / time_since_major_gc();
341 }
342
343 // The decayed cost should always be smaller than the
344 // average cost but the vagaries of finite arithmetic could
345 // produce a larger value in decayed_major_gc_cost so protect
346 // against that.
347 return MIN2(major_gc_cost_average, decayed_major_gc_cost);
348 }
349
350 // Use a value of the major gc cost that has been decayed
351 // by the factor
352 //
353 // average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale /
354 // time-since-last-major-gc
355 //
356 // if the average-interval-between-major-gc * AdaptiveSizeMajorGCDecayTimeScale
357 // is less than time-since-last-major-gc.
358 //
359 // In cases where there are initial major gc's that
360 // are of a relatively high cost but no later major
361 // gc's, the total gc cost can remain high because
362 // the major gc cost remains unchanged (since there are no major
363 // gc's). In such a situation the value of the unchanging
364 // major gc cost can keep the mutator throughput below
365 // the goal when in fact the major gc cost is becoming diminishingly
366 // small. Use the decaying gc cost only to decide whether to
367 // adjust for throughput. Using it also to determine the adjustment
368 // to be made for throughput also seems reasonable but there is
369 // no test case to use to decide if it is the right thing to do
370 // don't do it yet.
371
decaying_gc_cost() const372 double AdaptiveSizePolicy::decaying_gc_cost() const {
373 double decayed_major_gc_cost = major_gc_cost();
374 double avg_major_interval = major_gc_interval_average_for_decay();
375 if (UseAdaptiveSizeDecayMajorGCCost &&
376 (AdaptiveSizeMajorGCDecayTimeScale > 0) &&
377 (avg_major_interval > 0.00)) {
378 double time_since_last_major_gc = time_since_major_gc();
379
380 // Decay the major gc cost?
381 if (time_since_last_major_gc >
382 ((double) AdaptiveSizeMajorGCDecayTimeScale) * avg_major_interval) {
383
384 // Decay using the time-since-last-major-gc
385 decayed_major_gc_cost = decaying_major_gc_cost();
386 log_trace(gc, ergo)("decaying_gc_cost: major interval average: %f time since last major gc: %f",
387 avg_major_interval, time_since_last_major_gc);
388 log_trace(gc, ergo)(" major gc cost: %f decayed major gc cost: %f",
389 major_gc_cost(), decayed_major_gc_cost);
390 }
391 }
392 double result = MIN2(1.0, decayed_major_gc_cost + minor_gc_cost());
393 return result;
394 }
395
396
clear_generation_free_space_flags()397 void AdaptiveSizePolicy::clear_generation_free_space_flags() {
398 set_change_young_gen_for_min_pauses(0);
399 set_change_old_gen_for_maj_pauses(0);
400
401 set_change_old_gen_for_throughput(0);
402 set_change_young_gen_for_throughput(0);
403 set_decrease_for_footprint(0);
404 set_decide_at_full_gc(0);
405 }
406
check_gc_overhead_limit(size_t young_live,size_t eden_live,size_t max_old_gen_size,size_t max_eden_size,bool is_full_gc,GCCause::Cause gc_cause,SoftRefPolicy * soft_ref_policy)407 void AdaptiveSizePolicy::check_gc_overhead_limit(
408 size_t young_live,
409 size_t eden_live,
410 size_t max_old_gen_size,
411 size_t max_eden_size,
412 bool is_full_gc,
413 GCCause::Cause gc_cause,
414 SoftRefPolicy* soft_ref_policy) {
415
416 // Ignore explicit GC's. Exiting here does not set the flag and
417 // does not reset the count. Updating of the averages for system
418 // GC's is still controlled by UseAdaptiveSizePolicyWithSystemGC.
419 if (GCCause::is_user_requested_gc(gc_cause) ||
420 GCCause::is_serviceability_requested_gc(gc_cause)) {
421 return;
422 }
423 // eden_limit is the upper limit on the size of eden based on
424 // the maximum size of the young generation and the sizes
425 // of the survivor space.
426 // The question being asked is whether the gc costs are high
427 // and the space being recovered by a collection is low.
428 // free_in_young_gen is the free space in the young generation
429 // after a collection and promo_live is the free space in the old
430 // generation after a collection.
431 //
432 // Use the minimum of the current value of the live in the
433 // young gen or the average of the live in the young gen.
434 // If the current value drops quickly, that should be taken
435 // into account (i.e., don't trigger if the amount of free
436 // space has suddenly jumped up). If the current is much
437 // higher than the average, use the average since it represents
438 // the longer term behavior.
439 const size_t live_in_eden =
440 MIN2(eden_live, (size_t) avg_eden_live()->average());
441 const size_t free_in_eden = max_eden_size > live_in_eden ?
442 max_eden_size - live_in_eden : 0;
443 const size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
444 const size_t total_free_limit = free_in_old_gen + free_in_eden;
445 const size_t total_mem = max_old_gen_size + max_eden_size;
446 const double mem_free_limit = total_mem * (GCHeapFreeLimit/100.0);
447 const double mem_free_old_limit = max_old_gen_size * (GCHeapFreeLimit/100.0);
448 const double mem_free_eden_limit = max_eden_size * (GCHeapFreeLimit/100.0);
449 const double gc_cost_limit = GCTimeLimit/100.0;
450 size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());
451 // But don't force a promo size below the current promo size. Otherwise,
452 // the promo size will shrink for no good reason.
453 promo_limit = MAX2(promo_limit, _promo_size);
454
455
456 log_trace(gc, ergo)(
457 "PSAdaptiveSizePolicy::check_gc_overhead_limit:"
458 " promo_limit: " SIZE_FORMAT
459 " max_eden_size: " SIZE_FORMAT
460 " total_free_limit: " SIZE_FORMAT
461 " max_old_gen_size: " SIZE_FORMAT
462 " max_eden_size: " SIZE_FORMAT
463 " mem_free_limit: " SIZE_FORMAT,
464 promo_limit, max_eden_size, total_free_limit,
465 max_old_gen_size, max_eden_size,
466 (size_t) mem_free_limit);
467
468 bool print_gc_overhead_limit_would_be_exceeded = false;
469 if (is_full_gc) {
470 if (gc_cost() > gc_cost_limit &&
471 free_in_old_gen < (size_t) mem_free_old_limit &&
472 free_in_eden < (size_t) mem_free_eden_limit) {
473 // Collections, on average, are taking too much time, and
474 // gc_cost() > gc_cost_limit
475 // we have too little space available after a full gc.
476 // total_free_limit < mem_free_limit
477 // where
478 // total_free_limit is the free space available in
479 // both generations
480 // total_mem is the total space available for allocation
481 // in both generations (survivor spaces are not included
482 // just as they are not included in eden_limit).
483 // mem_free_limit is a fraction of total_mem judged to be an
484 // acceptable amount that is still unused.
485 // The heap can ask for the value of this variable when deciding
486 // whether to thrown an OutOfMemory error.
487 // Note that the gc time limit test only works for the collections
488 // of the young gen + tenured gen and not for collections of the
489 // permanent gen. That is because the calculation of the space
490 // freed by the collection is the free space in the young gen +
491 // tenured gen.
492 // At this point the GC overhead limit is being exceeded.
493 inc_gc_overhead_limit_count();
494 if (UseGCOverheadLimit) {
495 if (gc_overhead_limit_count() >=
496 AdaptiveSizePolicyGCTimeLimitThreshold){
497 // All conditions have been met for throwing an out-of-memory
498 set_gc_overhead_limit_exceeded(true);
499 // Avoid consecutive OOM due to the gc time limit by resetting
500 // the counter.
501 reset_gc_overhead_limit_count();
502 } else {
503 // The required consecutive collections which exceed the
504 // GC time limit may or may not have been reached. We
505 // are approaching that condition and so as not to
506 // throw an out-of-memory before all SoftRef's have been
507 // cleared, set _should_clear_all_soft_refs in CollectorPolicy.
508 // The clearing will be done on the next GC.
509 bool near_limit = gc_overhead_limit_near();
510 if (near_limit) {
511 soft_ref_policy->set_should_clear_all_soft_refs(true);
512 log_trace(gc, ergo)("Nearing GC overhead limit, will be clearing all SoftReference");
513 }
514 }
515 }
516 // Set this even when the overhead limit will not
517 // cause an out-of-memory. Diagnostic message indicating
518 // that the overhead limit is being exceeded is sometimes
519 // printed.
520 print_gc_overhead_limit_would_be_exceeded = true;
521
522 } else {
523 // Did not exceed overhead limits
524 reset_gc_overhead_limit_count();
525 }
526 }
527
528 if (UseGCOverheadLimit) {
529 if (gc_overhead_limit_exceeded()) {
530 log_trace(gc, ergo)("GC is exceeding overhead limit of " UINTX_FORMAT "%%", GCTimeLimit);
531 reset_gc_overhead_limit_count();
532 } else if (print_gc_overhead_limit_would_be_exceeded) {
533 assert(gc_overhead_limit_count() > 0, "Should not be printing");
534 log_trace(gc, ergo)("GC would exceed overhead limit of " UINTX_FORMAT "%% %d consecutive time(s)",
535 GCTimeLimit, gc_overhead_limit_count());
536 }
537 }
538 }
539 // Printing
540
print() const541 bool AdaptiveSizePolicy::print() const {
542 assert(UseAdaptiveSizePolicy, "UseAdaptiveSizePolicy need to be enabled.");
543
544 if (!log_is_enabled(Debug, gc, ergo)) {
545 return false;
546 }
547
548 // Print goal for which action is needed.
549 char* action = NULL;
550 bool change_for_pause = false;
551 if ((change_old_gen_for_maj_pauses() ==
552 decrease_old_gen_for_maj_pauses_true) ||
553 (change_young_gen_for_min_pauses() ==
554 decrease_young_gen_for_min_pauses_true)) {
555 action = (char*) " *** pause time goal ***";
556 change_for_pause = true;
557 } else if ((change_old_gen_for_throughput() ==
558 increase_old_gen_for_throughput_true) ||
559 (change_young_gen_for_throughput() ==
560 increase_young_gen_for_througput_true)) {
561 action = (char*) " *** throughput goal ***";
562 } else if (decrease_for_footprint()) {
563 action = (char*) " *** reduced footprint ***";
564 } else {
565 // No actions were taken. This can legitimately be the
566 // situation if not enough data has been gathered to make
567 // decisions.
568 return false;
569 }
570
571 // Pauses
572 // Currently the size of the old gen is only adjusted to
573 // change the major pause times.
574 char* young_gen_action = NULL;
575 char* tenured_gen_action = NULL;
576
577 char* shrink_msg = (char*) "(attempted to shrink)";
578 char* grow_msg = (char*) "(attempted to grow)";
579 char* no_change_msg = (char*) "(no change)";
580 if (change_young_gen_for_min_pauses() ==
581 decrease_young_gen_for_min_pauses_true) {
582 young_gen_action = shrink_msg;
583 } else if (change_for_pause) {
584 young_gen_action = no_change_msg;
585 }
586
587 if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
588 tenured_gen_action = shrink_msg;
589 } else if (change_for_pause) {
590 tenured_gen_action = no_change_msg;
591 }
592
593 // Throughput
594 if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
595 assert(change_young_gen_for_throughput() ==
596 increase_young_gen_for_througput_true,
597 "Both generations should be growing");
598 young_gen_action = grow_msg;
599 tenured_gen_action = grow_msg;
600 } else if (change_young_gen_for_throughput() ==
601 increase_young_gen_for_througput_true) {
602 // Only the young generation may grow at start up (before
603 // enough full collections have been done to grow the old generation).
604 young_gen_action = grow_msg;
605 tenured_gen_action = no_change_msg;
606 }
607
608 // Minimum footprint
609 if (decrease_for_footprint() != 0) {
610 young_gen_action = shrink_msg;
611 tenured_gen_action = shrink_msg;
612 }
613
614 log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
615 log_debug(gc, ergo)(" GC overhead (%%)");
616 log_debug(gc, ergo)(" Young generation: %7.2f\t %s",
617 100.0 * avg_minor_gc_cost()->average(), young_gen_action);
618 log_debug(gc, ergo)(" Tenured generation: %7.2f\t %s",
619 100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
620 return true;
621 }
622
print_tenuring_threshold(uint new_tenuring_threshold_arg) const623 void AdaptiveSizePolicy::print_tenuring_threshold( uint new_tenuring_threshold_arg) const {
624 // Tenuring threshold
625 if (decrement_tenuring_threshold_for_survivor_limit()) {
626 log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to avoid survivor space overflow) = %u", new_tenuring_threshold_arg);
627 } else if (decrement_tenuring_threshold_for_gc_cost()) {
628 log_debug(gc, ergo)("Tenuring threshold: (attempted to decrease to balance GC costs) = %u", new_tenuring_threshold_arg);
629 } else if (increment_tenuring_threshold_for_gc_cost()) {
630 log_debug(gc, ergo)("Tenuring threshold: (attempted to increase to balance GC costs) = %u", new_tenuring_threshold_arg);
631 } else {
632 assert(!tenuring_threshold_change(), "(no change was attempted)");
633 }
634 }
635