1 /*
2  * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/epsilon/epsilonHeap.hpp"
27 #include "gc/epsilon/epsilonInitLogger.hpp"
28 #include "gc/epsilon/epsilonMemoryPool.hpp"
29 #include "gc/epsilon/epsilonThreadLocalData.hpp"
30 #include "gc/shared/gcArguments.hpp"
31 #include "gc/shared/locationPrinter.inline.hpp"
32 #include "memory/allocation.hpp"
33 #include "memory/allocation.inline.hpp"
34 #include "memory/metaspaceUtils.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/globals.hpp"
39 
initialize()40 jint EpsilonHeap::initialize() {
41   size_t align = HeapAlignment;
42   size_t init_byte_size = align_up(InitialHeapSize, align);
43   size_t max_byte_size  = align_up(MaxHeapSize, align);
44 
45   // Initialize backing storage
46   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
47   _virtual_space.initialize(heap_rs, init_byte_size);
48 
49   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
50 
51   initialize_reserved_region(heap_rs);
52 
53   _space = new ContiguousSpace();
54   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
55 
56   // Precompute hot fields
57   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
58   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
59   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
60   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
61 
62   // Enable monitoring
63   _monitoring_support = new EpsilonMonitoringSupport(this);
64   _last_counter_update = 0;
65   _last_heap_print = 0;
66 
67   // Install barrier set
68   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
69 
70   // All done, print out the configuration
71   EpsilonInitLogger::print();
72 
73   return JNI_OK;
74 }
75 
post_initialize()76 void EpsilonHeap::post_initialize() {
77   CollectedHeap::post_initialize();
78 }
79 
initialize_serviceability()80 void EpsilonHeap::initialize_serviceability() {
81   _pool = new EpsilonMemoryPool(this);
82   _memory_manager.add_pool(_pool);
83 }
84 
memory_managers()85 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
86   GrowableArray<GCMemoryManager*> memory_managers(1);
87   memory_managers.append(&_memory_manager);
88   return memory_managers;
89 }
90 
memory_pools()91 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
92   GrowableArray<MemoryPool*> memory_pools(1);
93   memory_pools.append(_pool);
94   return memory_pools;
95 }
96 
unsafe_max_tlab_alloc(Thread * thr) const97 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
98   // Return max allocatable TLAB size, and let allocation path figure out
99   // the actual allocation size. Note: result should be in bytes.
100   return _max_tlab_size * HeapWordSize;
101 }
102 
heap()103 EpsilonHeap* EpsilonHeap::heap() {
104   return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
105 }
106 
allocate_work(size_t size)107 HeapWord* EpsilonHeap::allocate_work(size_t size) {
108   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
109 
110   HeapWord* res = NULL;
111   while (true) {
112     // Try to allocate, assume space is available
113     res = _space->par_allocate(size);
114     if (res != NULL) {
115       break;
116     }
117 
118     // Allocation failed, attempt expansion, and retry:
119     {
120       MutexLocker ml(Heap_lock);
121 
122       // Try to allocate under the lock, assume another thread was able to expand
123       res = _space->par_allocate(size);
124       if (res != NULL) {
125         break;
126       }
127 
128       // Expand and loop back if space is available
129       size_t space_left = max_capacity() - capacity();
130       size_t want_space = MAX2(size, EpsilonMinHeapExpand);
131 
132       if (want_space < space_left) {
133         // Enough space to expand in bulk:
134         bool expand = _virtual_space.expand_by(want_space);
135         assert(expand, "Should be able to expand");
136       } else if (size < space_left) {
137         // No space to expand in bulk, and this allocation is still possible,
138         // take all the remaining space:
139         bool expand = _virtual_space.expand_by(space_left);
140         assert(expand, "Should be able to expand");
141       } else {
142         // No space left:
143         return NULL;
144       }
145 
146       _space->set_end((HeapWord *) _virtual_space.high());
147     }
148   }
149 
150   size_t used = _space->used();
151 
152   // Allocation successful, update counters
153   {
154     size_t last = _last_counter_update;
155     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
156       _monitoring_support->update_counters();
157     }
158   }
159 
160   // ...and print the occupancy line, if needed
161   {
162     size_t last = _last_heap_print;
163     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
164       print_heap_info(used);
165       print_metaspace_info();
166     }
167   }
168 
169   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
170   return res;
171 }
172 
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)173 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
174                                          size_t requested_size,
175                                          size_t* actual_size) {
176   Thread* thread = Thread::current();
177 
178   // Defaults in case elastic paths are not taken
179   bool fits = true;
180   size_t size = requested_size;
181   size_t ergo_tlab = requested_size;
182   int64_t time = 0;
183 
184   if (EpsilonElasticTLAB) {
185     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
186 
187     if (EpsilonElasticTLABDecay) {
188       int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
189       time = (int64_t) os::javaTimeNanos();
190 
191       assert(last_time <= time, "time should be monotonic");
192 
193       // If the thread had not allocated recently, retract the ergonomic size.
194       // This conserves memory when the thread had initial burst of allocations,
195       // and then started allocating only sporadically.
196       if (last_time != 0 && (time - last_time > _decay_time_ns)) {
197         ergo_tlab = 0;
198         EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
199       }
200     }
201 
202     // If we can fit the allocation under current TLAB size, do so.
203     // Otherwise, we want to elastically increase the TLAB size.
204     fits = (requested_size <= ergo_tlab);
205     if (!fits) {
206       size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
207     }
208   }
209 
210   // Always honor boundaries
211   size = clamp(size, min_size, _max_tlab_size);
212 
213   // Always honor alignment
214   size = align_up(size, MinObjAlignment);
215 
216   // Check that adjustments did not break local and global invariants
217   assert(is_object_aligned(size),
218          "Size honors object alignment: " SIZE_FORMAT, size);
219   assert(min_size <= size,
220          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
221   assert(size <= _max_tlab_size,
222          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
223   assert(size <= CollectedHeap::max_tlab_size(),
224          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
225 
226   if (log_is_enabled(Trace, gc)) {
227     ResourceMark rm;
228     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
229                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
230                   thread->name(),
231                   requested_size * HeapWordSize / K,
232                   min_size * HeapWordSize / K,
233                   _max_tlab_size * HeapWordSize / K,
234                   ergo_tlab * HeapWordSize / K,
235                   size * HeapWordSize / K);
236   }
237 
238   // All prepared, let's do it!
239   HeapWord* res = allocate_work(size);
240 
241   if (res != NULL) {
242     // Allocation successful
243     *actual_size = size;
244     if (EpsilonElasticTLABDecay) {
245       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
246     }
247     if (EpsilonElasticTLAB && !fits) {
248       // If we requested expansion, this is our new ergonomic TLAB size
249       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
250     }
251   } else {
252     // Allocation failed, reset ergonomics to try and fit smaller TLABs
253     if (EpsilonElasticTLAB) {
254       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
255     }
256   }
257 
258   return res;
259 }
260 
mem_allocate(size_t size,bool * gc_overhead_limit_was_exceeded)261 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
262   *gc_overhead_limit_was_exceeded = false;
263   return allocate_work(size);
264 }
265 
collect(GCCause::Cause cause)266 void EpsilonHeap::collect(GCCause::Cause cause) {
267   switch (cause) {
268     case GCCause::_metadata_GC_threshold:
269     case GCCause::_metadata_GC_clear_soft_refs:
270       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
271       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
272       // re-enter the safepoint again very soon.
273 
274       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
275       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
276       MetaspaceGC::compute_new_size();
277       print_metaspace_info();
278       break;
279     default:
280       log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
281   }
282   _monitoring_support->update_counters();
283 }
284 
do_full_collection(bool clear_all_soft_refs)285 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
286   collect(gc_cause());
287 }
288 
object_iterate(ObjectClosure * cl)289 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
290   _space->object_iterate(cl);
291 }
292 
print_on(outputStream * st) const293 void EpsilonHeap::print_on(outputStream *st) const {
294   st->print_cr("Epsilon Heap");
295 
296   // Cast away constness:
297   ((VirtualSpace)_virtual_space).print_on(st);
298 
299   if (_space != NULL) {
300     st->print_cr("Allocation space:");
301     _space->print_on(st);
302   }
303 
304   MetaspaceUtils::print_on(st);
305 }
306 
print_location(outputStream * st,void * addr) const307 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
308   return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
309 }
310 
print_tracing_info() const311 void EpsilonHeap::print_tracing_info() const {
312   print_heap_info(used());
313   print_metaspace_info();
314 }
315 
print_heap_info(size_t used) const316 void EpsilonHeap::print_heap_info(size_t used) const {
317   size_t reserved  = max_capacity();
318   size_t committed = capacity();
319 
320   if (reserved != 0) {
321     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
322                  SIZE_FORMAT "%s (%.2f%%) used",
323             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
324             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
325             committed * 100.0 / reserved,
326             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
327             used * 100.0 / reserved);
328   } else {
329     log_info(gc)("Heap: no reliable data");
330   }
331 }
332 
print_metaspace_info() const333 void EpsilonHeap::print_metaspace_info() const {
334   MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
335   size_t reserved  = stats.reserved();
336   size_t committed = stats.committed();
337   size_t used      = stats.used();
338 
339   if (reserved != 0) {
340     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
341                             SIZE_FORMAT "%s (%.2f%%) used",
342             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
343             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
344             committed * 100.0 / reserved,
345             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
346             used * 100.0 / reserved);
347   } else {
348     log_info(gc, metaspace)("Metaspace: no reliable data");
349   }
350 }
351