1 /*
2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/shared/gcHeapSummary.hpp"
26 #include "gc/z/zCollectedHeap.hpp"
27 #include "gc/z/zGlobals.hpp"
28 #include "gc/z/zHeap.inline.hpp"
29 #include "gc/z/zNMethodTable.hpp"
30 #include "gc/z/zServiceability.hpp"
31 #include "gc/z/zStat.hpp"
32 #include "gc/z/zUtils.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "runtime/mutexLocker.hpp"
35 
heap()36 ZCollectedHeap* ZCollectedHeap::heap() {
37   CollectedHeap* heap = Universe::heap();
38   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
39   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
40   return (ZCollectedHeap*)heap;
41 }
42 
ZCollectedHeap(ZCollectorPolicy * policy)43 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
44     _collector_policy(policy),
45     _soft_ref_policy(),
46     _barrier_set(),
47     _initialize(&_barrier_set),
48     _heap(),
49     _director(new ZDirector()),
50     _driver(new ZDriver()),
51     _stat(new ZStat()),
52     _runtime_workers() {}
53 
kind() const54 CollectedHeap::Name ZCollectedHeap::kind() const {
55   return CollectedHeap::Z;
56 }
57 
name() const58 const char* ZCollectedHeap::name() const {
59   return ZGCName;
60 }
61 
initialize()62 jint ZCollectedHeap::initialize() {
63   if (!_heap.is_initialized()) {
64     return JNI_ENOMEM;
65   }
66 
67   initialize_reserved_region((HeapWord*)ZAddressReservedStart(),
68                              (HeapWord*)ZAddressReservedEnd());
69 
70   return JNI_OK;
71 }
72 
initialize_serviceability()73 void ZCollectedHeap::initialize_serviceability() {
74   _heap.serviceability_initialize();
75 }
76 
stop()77 void ZCollectedHeap::stop() {
78   _director->stop();
79   _driver->stop();
80   _stat->stop();
81 }
82 
collector_policy() const83 CollectorPolicy* ZCollectedHeap::collector_policy() const {
84   return _collector_policy;
85 }
86 
soft_ref_policy()87 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
88   return &_soft_ref_policy;
89 }
90 
max_capacity() const91 size_t ZCollectedHeap::max_capacity() const {
92   return _heap.max_capacity();
93 }
94 
capacity() const95 size_t ZCollectedHeap::capacity() const {
96   return _heap.capacity();
97 }
98 
used() const99 size_t ZCollectedHeap::used() const {
100   return _heap.used();
101 }
102 
is_maximal_no_gc() const103 bool ZCollectedHeap::is_maximal_no_gc() const {
104   // Not supported
105   ShouldNotReachHere();
106   return false;
107 }
108 
is_scavengable(oop obj)109 bool ZCollectedHeap::is_scavengable(oop obj) {
110   return false;
111 }
112 
is_in(const void * p) const113 bool ZCollectedHeap::is_in(const void* p) const {
114   return is_in_reserved(p) && _heap.is_in((uintptr_t)p);
115 }
116 
is_in_closed_subset(const void * p) const117 bool ZCollectedHeap::is_in_closed_subset(const void* p) const {
118   return is_in(p);
119 }
120 
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)121 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
122   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
123   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
124 
125   if (addr != 0) {
126     *actual_size = requested_size;
127   }
128 
129   return (HeapWord*)addr;
130 }
131 
mem_allocate(size_t size,bool * gc_overhead_limit_was_exceeded)132 HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
133   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
134   return (HeapWord*)_heap.alloc_object(size_in_bytes);
135 }
136 
satisfy_failed_metadata_allocation(ClassLoaderData * loader_data,size_t size,Metaspace::MetadataType mdtype)137 MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
138                                                              size_t size,
139                                                              Metaspace::MetadataType mdtype) {
140   MetaWord* result;
141 
142   // Start asynchronous GC
143   collect(GCCause::_metadata_GC_threshold);
144 
145   // Expand and retry allocation
146   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
147   if (result != NULL) {
148     return result;
149   }
150 
151   // Start synchronous GC
152   collect(GCCause::_metadata_GC_clear_soft_refs);
153 
154   // Retry allocation
155   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
156   if (result != NULL) {
157     return result;
158   }
159 
160   // Expand and retry allocation
161   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
162   if (result != NULL) {
163     return result;
164   }
165 
166   // Out of memory
167   return NULL;
168 }
169 
collect(GCCause::Cause cause)170 void ZCollectedHeap::collect(GCCause::Cause cause) {
171   _driver->collect(cause);
172 }
173 
collect_as_vm_thread(GCCause::Cause cause)174 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
175   // These collection requests are ignored since ZGC can't run a synchronous
176   // GC cycle from within the VM thread. This is considered benign, since the
177   // only GC causes coming in here should be heap dumper and heap inspector.
178   // However, neither the heap dumper nor the heap inspector really need a GC
179   // to happen, but the result of their heap iterations might in that case be
180   // less accurate since they might include objects that would otherwise have
181   // been collected by a GC.
182   assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
183   guarantee(cause == GCCause::_heap_dump ||
184             cause == GCCause::_heap_inspection, "Invalid cause");
185 }
186 
do_full_collection(bool clear_all_soft_refs)187 void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
188   // Not supported
189   ShouldNotReachHere();
190 }
191 
supports_tlab_allocation() const192 bool ZCollectedHeap::supports_tlab_allocation() const {
193   return true;
194 }
195 
tlab_capacity(Thread * ignored) const196 size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
197   return _heap.tlab_capacity();
198 }
199 
tlab_used(Thread * ignored) const200 size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
201   return _heap.tlab_used();
202 }
203 
max_tlab_size() const204 size_t ZCollectedHeap::max_tlab_size() const {
205   return _heap.max_tlab_size();
206 }
207 
unsafe_max_tlab_alloc(Thread * ignored) const208 size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
209   return _heap.unsafe_max_tlab_alloc();
210 }
211 
can_elide_tlab_store_barriers() const212 bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
213   return false;
214 }
215 
can_elide_initializing_store_barrier(oop new_obj)216 bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
217   // Not supported
218   ShouldNotReachHere();
219   return true;
220 }
221 
card_mark_must_follow_store() const222 bool ZCollectedHeap::card_mark_must_follow_store() const {
223   // Not supported
224   ShouldNotReachHere();
225   return false;
226 }
227 
memory_managers()228 GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
229   return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
230 }
231 
memory_pools()232 GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
233   return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
234 }
235 
object_iterate(ObjectClosure * cl)236 void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
237   _heap.object_iterate(cl, true /* visit_referents */);
238 }
239 
safe_object_iterate(ObjectClosure * cl)240 void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
241   _heap.object_iterate(cl, true /* visit_referents */);
242 }
243 
block_start(const void * addr) const244 HeapWord* ZCollectedHeap::block_start(const void* addr) const {
245   return (HeapWord*)_heap.block_start((uintptr_t)addr);
246 }
247 
block_size(const HeapWord * addr) const248 size_t ZCollectedHeap::block_size(const HeapWord* addr) const {
249   size_t size_in_bytes = _heap.block_size((uintptr_t)addr);
250   return ZUtils::bytes_to_words(size_in_bytes);
251 }
252 
block_is_obj(const HeapWord * addr) const253 bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
254   return _heap.block_is_obj((uintptr_t)addr);
255 }
256 
keep_alive(oop obj)257 void ZCollectedHeap::keep_alive(oop obj) {
258   _heap.keep_alive(obj);
259 }
260 
register_nmethod(nmethod * nm)261 void ZCollectedHeap::register_nmethod(nmethod* nm) {
262   assert_locked_or_safepoint(CodeCache_lock);
263   ZNMethodTable::register_nmethod(nm);
264 }
265 
unregister_nmethod(nmethod * nm)266 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
267   assert_locked_or_safepoint(CodeCache_lock);
268   ZNMethodTable::unregister_nmethod(nm);
269 }
270 
verify_nmethod(nmethod * nm)271 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
272   // Does nothing
273 }
274 
get_safepoint_workers()275 WorkGang* ZCollectedHeap::get_safepoint_workers() {
276   return _runtime_workers.workers();
277 }
278 
millis_since_last_gc()279 jlong ZCollectedHeap::millis_since_last_gc() {
280   return ZStatCycle::time_since_last() / MILLIUNITS;
281 }
282 
gc_threads_do(ThreadClosure * tc) const283 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
284   tc->do_thread(_director);
285   tc->do_thread(_driver);
286   tc->do_thread(_stat);
287   _heap.worker_threads_do(tc);
288   _runtime_workers.threads_do(tc);
289 }
290 
create_heap_space_summary()291 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
292   const size_t capacity_in_words = capacity() / HeapWordSize;
293   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
294   return VirtualSpaceSummary(reserved_region().start(),
295                              reserved_region().start() + capacity_in_words,
296                              reserved_region().start() + max_capacity_in_words);
297 }
298 
prepare_for_verify()299 void ZCollectedHeap::prepare_for_verify() {
300   // Does nothing
301 }
302 
print_on(outputStream * st) const303 void ZCollectedHeap::print_on(outputStream* st) const {
304   _heap.print_on(st);
305 }
306 
print_on_error(outputStream * st) const307 void ZCollectedHeap::print_on_error(outputStream* st) const {
308   CollectedHeap::print_on_error(st);
309 
310   st->print_cr("Address Space");
311   st->print_cr( "     Start:             " PTR_FORMAT, ZAddressSpaceStart);
312   st->print_cr( "     End:               " PTR_FORMAT, ZAddressSpaceEnd);
313   st->print_cr( "     Size:              " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
314   st->print_cr( "Heap");
315   st->print_cr( "     GlobalPhase:       %u", ZGlobalPhase);
316   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
317   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
318   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
319   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
320   st->print_cr( "Metadata Bits");
321   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
322   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
323   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
324   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
325   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
326 }
327 
print_extended_on(outputStream * st) const328 void ZCollectedHeap::print_extended_on(outputStream* st) const {
329   _heap.print_extended_on(st);
330 }
331 
print_gc_threads_on(outputStream * st) const332 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
333   _director->print_on(st);
334   st->cr();
335   _driver->print_on(st);
336   st->cr();
337   _stat->print_on(st);
338   st->cr();
339   _heap.print_worker_threads_on(st);
340   _runtime_workers.print_threads_on(st);
341 }
342 
print_tracing_info() const343 void ZCollectedHeap::print_tracing_info() const {
344   // Does nothing
345 }
346 
verify(VerifyOption option)347 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
348   _heap.verify();
349 }
350 
is_oop(oop object) const351 bool ZCollectedHeap::is_oop(oop object) const {
352   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
353 }
354