1 /* 2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_G1_G1MONITORINGSUPPORT_HPP 26 #define SHARE_GC_G1_G1MONITORINGSUPPORT_HPP 27 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/generationCounters.hpp" 30 #include "services/memoryManager.hpp" 31 #include "services/memoryService.hpp" 32 #include "runtime/mutex.hpp" 33 34 class CollectorCounters; 35 class G1CollectedHeap; 36 class HSpaceCounters; 37 class MemoryPool; 38 39 // Class for monitoring logical spaces in G1. It provides data for 40 // both G1's jstat counters as well as G1's memory pools. 41 // 42 // G1 splits the heap into heap regions and each heap region belongs 43 // to one of the following categories: 44 // 45 // * eden : regions that have been allocated since the last GC 46 // * survivors : regions with objects that survived the last few GCs 47 // * old : long-lived non-humongous regions 48 // * humongous : humongous regions 49 // * free : free regions 50 // 51 // The combination of eden and survivor regions form the equivalent of 52 // the young generation in the other GCs. The combination of old and 53 // humongous regions form the equivalent of the old generation in the 54 // other GCs. Free regions do not have a good equivalent in the other 55 // GCs given that they can be allocated as any of the other region types. 56 // 57 // The monitoring tools expect the heap to contain a number of 58 // generations (young, old, perm) and each generation to contain a 59 // number of spaces (young: eden, survivors, old). Given that G1 does 60 // not maintain those spaces physically (e.g., the set of 61 // non-contiguous eden regions can be considered as a "logical" 62 // space), we'll provide the illusion that those generations and 63 // spaces exist. In reality, each generation and space refers to a set 64 // of heap regions that are potentially non-contiguous. 65 // 66 // This class provides interfaces to access the min, current, and max 67 // capacity and current occupancy for each of G1's logical spaces and 68 // generations we expose to the monitoring tools. Also provided are 69 // counters for G1 concurrent collections and stop-the-world full heap 70 // collections. 71 // 72 // Below is a description of how the various sizes are calculated. 73 // 74 // * Current Capacity 75 // 76 // - heap_capacity = current heap capacity (e.g., current committed size) 77 // - young_gen_capacity = current max young gen target capacity 78 // (i.e., young gen target capacity + max allowed expansion capacity) 79 // - survivor_capacity = current survivor region capacity 80 // - eden_capacity = young_gen_capacity - survivor_capacity 81 // - old_capacity = heap_capacity - young_gen_capacity 82 // 83 // What we do in the above is to distribute the free regions among 84 // eden_capacity and old_capacity. 85 // 86 // * Occupancy 87 // 88 // - young_gen_used = current young region capacity 89 // - survivor_used = survivor_capacity 90 // - eden_used = young_gen_used - survivor_used 91 // - old_used = overall_used - young_gen_used 92 // 93 // Unfortunately, we currently only keep track of the number of 94 // currently allocated young and survivor regions + the overall used 95 // bytes in the heap, so the above can be a little inaccurate. 96 // 97 // * Min Capacity 98 // 99 // We set this to 0 for all spaces. 100 // 101 // * Max Capacity 102 // 103 // For jstat, we set the max capacity of all spaces to heap_capacity, 104 // given that we don't always have a reasonable upper bound on how big 105 // each space can grow. For the memory pools, we make the max 106 // capacity undefined with the exception of the old memory pool for 107 // which we make the max capacity same as the max heap capacity. 108 // 109 // If we had more accurate occupancy / capacity information per 110 // region set the above calculations would be greatly simplified and 111 // be made more accurate. 112 // 113 // We update all the above synchronously and we store the results in 114 // fields so that we just read said fields when needed. A subtle point 115 // is that all the above sizes need to be recalculated when the old 116 // gen changes capacity (after a GC or after a humongous allocation) 117 // but only the eden occupancy changes when a new eden region is 118 // allocated. So, in the latter case we have minimal recalculation to 119 // do which is important as we want to keep the eden region allocation 120 // path as low-overhead as possible. 121 122 class G1MonitoringSupport : public CHeapObj<mtGC> { 123 friend class VMStructs; 124 friend class G1MonitoringScope; 125 126 G1CollectedHeap* _g1h; 127 128 // java.lang.management MemoryManager and MemoryPool support 129 GCMemoryManager _incremental_memory_manager; 130 GCMemoryManager _full_gc_memory_manager; 131 132 MemoryPool* _eden_space_pool; 133 MemoryPool* _survivor_space_pool; 134 MemoryPool* _old_gen_pool; 135 136 // jstat performance counters 137 // incremental collections both young and mixed 138 CollectorCounters* _incremental_collection_counters; 139 // full stop-the-world collections 140 CollectorCounters* _full_collection_counters; 141 // stop-the-world phases in G1 142 CollectorCounters* _conc_collection_counters; 143 // young collection set counters. The _eden_counters, 144 // _from_counters, and _to_counters are associated with 145 // this "generational" counter. 146 GenerationCounters* _young_gen_counters; 147 // old collection set counters. The _old_space_counters 148 // below are associated with this "generational" counter. 149 GenerationCounters* _old_gen_counters; 150 // Counters for the capacity and used for 151 // the whole heap 152 HSpaceCounters* _old_space_counters; 153 // the young collection 154 HSpaceCounters* _eden_space_counters; 155 // the survivor collection (only one, _to_counters, is actively used) 156 HSpaceCounters* _from_space_counters; 157 HSpaceCounters* _to_space_counters; 158 159 // When it's appropriate to recalculate the various sizes (at the 160 // end of a GC, when a new eden region is allocated, etc.) we store 161 // them here so that we can easily report them when needed and not 162 // have to recalculate them every time. 163 164 size_t _overall_committed; 165 size_t _overall_used; 166 167 size_t _young_gen_committed; 168 size_t _old_gen_committed; 169 170 size_t _eden_space_committed; 171 size_t _eden_space_used; 172 size_t _survivor_space_committed; 173 size_t _survivor_space_used; 174 175 size_t _old_gen_used; 176 177 // Recalculate all the sizes. 178 void recalculate_sizes(); 179 180 void recalculate_eden_size(); 181 182 public: 183 G1MonitoringSupport(G1CollectedHeap* g1h); 184 ~G1MonitoringSupport(); 185 186 void initialize_serviceability(); 187 188 MemoryUsage memory_usage(); 189 GrowableArray<GCMemoryManager*> memory_managers(); 190 GrowableArray<MemoryPool*> memory_pools(); 191 192 // Unfortunately, the jstat tool assumes that no space has 0 193 // capacity. In our case, given that each space is logical, it's 194 // possible that no regions will be allocated to it, hence to have 0 195 // capacity (e.g., if there are no survivor regions, the survivor 196 // space has 0 capacity). The way we deal with this is to always pad 197 // each capacity value we report to jstat by a very small amount to 198 // make sure that it's never zero. Given that we sometimes have to 199 // report a capacity of a generation that contains several spaces 200 // (e.g., young gen includes one eden, two survivor spaces), the 201 // mult parameter is provided in order to adding the appropriate 202 // padding multiple times so that the capacities add up correctly. pad_capacity(size_t size_bytes,size_t mult=1)203 static size_t pad_capacity(size_t size_bytes, size_t mult = 1) { 204 return size_bytes + MinObjAlignmentInBytes * mult; 205 } 206 207 // Recalculate all the sizes from scratch and update all the jstat 208 // counters accordingly. 209 void update_sizes(); 210 211 void update_eden_size(); 212 conc_collection_counters()213 CollectorCounters* conc_collection_counters() { 214 return _conc_collection_counters; 215 } 216 217 // Monitoring support used by 218 // MemoryService 219 // jstat counters 220 // Tracing 221 // Values may not be consistent wrt to each other. 222 young_gen_committed()223 size_t young_gen_committed() { return _young_gen_committed; } 224 eden_space_used()225 size_t eden_space_used() { return _eden_space_used; } survivor_space_used()226 size_t survivor_space_used() { return _survivor_space_used; } 227 old_gen_committed()228 size_t old_gen_committed() { return _old_gen_committed; } old_gen_used()229 size_t old_gen_used() { return _old_gen_used; } 230 231 // Monitoring support for MemoryPools. Values in the returned MemoryUsage are 232 // guaranteed to be consistent with each other. 233 MemoryUsage eden_space_memory_usage(size_t initial_size, size_t max_size); 234 MemoryUsage survivor_space_memory_usage(size_t initial_size, size_t max_size); 235 236 MemoryUsage old_gen_memory_usage(size_t initial_size, size_t max_size); 237 }; 238 239 // Scope object for java.lang.management support. 240 class G1MonitoringScope : public StackObj { 241 TraceCollectorStats _tcs; 242 TraceMemoryManagerStats _tms; 243 public: 244 G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected); 245 }; 246 247 #endif // SHARE_GC_G1_G1MONITORINGSUPPORT_HPP 248