1 /*
2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/z/zAddress.inline.hpp"
26 #include "gc/z/zCollectedHeap.hpp"
27 #include "gc/z/zFuture.inline.hpp"
28 #include "gc/z/zGlobals.hpp"
29 #include "gc/z/zLock.inline.hpp"
30 #include "gc/z/zPage.inline.hpp"
31 #include "gc/z/zPageAllocator.hpp"
32 #include "gc/z/zPageCache.inline.hpp"
33 #include "gc/z/zPreMappedMemory.inline.hpp"
34 #include "gc/z/zStat.hpp"
35 #include "gc/z/zTracer.inline.hpp"
36 #include "runtime/init.hpp"
37 
38 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
39 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
40 
41 class ZPageAllocRequest : public StackObj {
42   friend class ZList<ZPageAllocRequest>;
43 
44 private:
45   const uint8_t                _type;
46   const size_t                 _size;
47   const ZAllocationFlags       _flags;
48   const unsigned int           _total_collections;
49   ZListNode<ZPageAllocRequest> _node;
50   ZFuture<ZPage*>              _result;
51 
52 public:
ZPageAllocRequest(uint8_t type,size_t size,ZAllocationFlags flags,unsigned int total_collections)53   ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
54       _type(type),
55       _size(size),
56       _flags(flags),
57       _total_collections(total_collections) {}
58 
type() const59   uint8_t type() const {
60     return _type;
61   }
62 
size() const63   size_t size() const {
64     return _size;
65   }
66 
flags() const67   ZAllocationFlags flags() const {
68     return _flags;
69   }
70 
total_collections() const71   unsigned int total_collections() const {
72     return _total_collections;
73   }
74 
wait()75   ZPage* wait() {
76     return _result.get();
77   }
78 
satisfy(ZPage * page)79   void satisfy(ZPage* page) {
80     _result.set(page);
81   }
82 };
83 
84 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
85 
ZPageAllocator(size_t min_capacity,size_t max_capacity,size_t max_reserve)86 ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
87     _lock(),
88     _virtual(),
89     _physical(max_capacity, ZPageSizeMin),
90     _cache(),
91     _max_reserve(max_reserve),
92     _pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)),
93     _used_high(0),
94     _used_low(0),
95     _used(0),
96     _allocated(0),
97     _reclaimed(0),
98     _queue(),
99     _detached() {}
100 
is_initialized() const101 bool ZPageAllocator::is_initialized() const {
102   return _physical.is_initialized() &&
103          _virtual.is_initialized() &&
104          _pre_mapped.is_initialized();
105 }
106 
max_capacity() const107 size_t ZPageAllocator::max_capacity() const {
108   return _physical.max_capacity();
109 }
110 
current_max_capacity() const111 size_t ZPageAllocator::current_max_capacity() const {
112   return _physical.current_max_capacity();
113 }
114 
capacity() const115 size_t ZPageAllocator::capacity() const {
116   return _physical.capacity();
117 }
118 
max_reserve() const119 size_t ZPageAllocator::max_reserve() const {
120   return _max_reserve;
121 }
122 
used_high() const123 size_t ZPageAllocator::used_high() const {
124   return _used_high;
125 }
126 
used_low() const127 size_t ZPageAllocator::used_low() const {
128   return _used_low;
129 }
130 
used() const131 size_t ZPageAllocator::used() const {
132   return _used;
133 }
134 
allocated() const135 size_t ZPageAllocator::allocated() const {
136   return _allocated;
137 }
138 
reclaimed() const139 size_t ZPageAllocator::reclaimed() const {
140   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
141 }
142 
reset_statistics()143 void ZPageAllocator::reset_statistics() {
144   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
145   _allocated = 0;
146   _reclaimed = 0;
147   _used_high = _used_low = _used;
148 }
149 
increase_used(size_t size,bool relocation)150 void ZPageAllocator::increase_used(size_t size, bool relocation) {
151   if (relocation) {
152     // Allocating a page for the purpose of relocation has a
153     // negative contribution to the number of reclaimed bytes.
154     _reclaimed -= size;
155   }
156   _allocated += size;
157   _used += size;
158   if (_used > _used_high) {
159     _used_high = _used;
160   }
161 }
162 
decrease_used(size_t size,bool reclaimed)163 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
164   if (reclaimed) {
165     // Only pages explicitly released with the reclaimed flag set
166     // counts as reclaimed bytes. This flag is typically true when
167     // a worker releases a page after relocation, and is typically
168     // false when we release a page to undo an allocation.
169     _reclaimed += size;
170   }
171   _used -= size;
172   if (_used < _used_low) {
173     _used_low = _used;
174   }
175 }
176 
max_available(bool no_reserve) const177 size_t ZPageAllocator::max_available(bool no_reserve) const {
178   size_t available = current_max_capacity() - used();
179 
180   if (no_reserve) {
181     // The reserve should not be considered available
182     available -= MIN2(available, max_reserve());
183   }
184 
185   return available;
186 }
187 
try_ensure_unused(size_t size,bool no_reserve)188 size_t ZPageAllocator::try_ensure_unused(size_t size, bool no_reserve) {
189   // Ensure that we always have space available for the reserve. This
190   // is needed to avoid losing the reserve because of failure to map
191   // more memory before reaching max capacity.
192   _physical.try_ensure_unused_capacity(size + max_reserve());
193 
194   size_t unused = _physical.unused_capacity();
195 
196   if (no_reserve) {
197     // The reserve should not be considered unused
198     unused -= MIN2(unused, max_reserve());
199   }
200 
201   return MIN2(size, unused);
202 }
203 
try_ensure_unused_for_pre_mapped(size_t size)204 size_t ZPageAllocator::try_ensure_unused_for_pre_mapped(size_t size) {
205   // This function is called during construction, where the
206   // physical memory manager might have failed to initialied.
207   if (!_physical.is_initialized()) {
208     return 0;
209   }
210 
211   return try_ensure_unused(size, true /* no_reserve */);
212 }
213 
create_page(uint8_t type,size_t size)214 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
215   // Allocate physical memory
216   const ZPhysicalMemory pmem = _physical.alloc(size);
217   if (pmem.is_null()) {
218     // Out of memory
219     return NULL;
220   }
221 
222   // Allocate virtual memory
223   const ZVirtualMemory vmem = _virtual.alloc(size);
224   if (vmem.is_null()) {
225     // Out of address space
226     _physical.free(pmem);
227     return NULL;
228   }
229 
230   // Allocate page
231   return new ZPage(type, vmem, pmem);
232 }
233 
flush_pre_mapped()234 void ZPageAllocator::flush_pre_mapped() {
235   if (_pre_mapped.available() == 0) {
236     return;
237   }
238 
239   // Detach the memory mapping.
240   detach_memory(_pre_mapped.virtual_memory(), _pre_mapped.physical_memory());
241 
242   _pre_mapped.clear();
243 }
244 
map_page(ZPage * page)245 void ZPageAllocator::map_page(ZPage* page) {
246   // Map physical memory
247   _physical.map(page->physical_memory(), page->start());
248 }
249 
detach_page(ZPage * page)250 void ZPageAllocator::detach_page(ZPage* page) {
251   // Detach the memory mapping.
252   detach_memory(page->virtual_memory(), page->physical_memory());
253 
254   // Add to list of detached pages
255   _detached.insert_last(page);
256 }
257 
destroy_page(ZPage * page)258 void ZPageAllocator::destroy_page(ZPage* page) {
259   assert(page->is_detached(), "Invalid page state");
260 
261   // Free virtual memory
262   {
263     ZLocker locker(&_lock);
264     _virtual.free(page->virtual_memory());
265   }
266 
267   delete page;
268 }
269 
flush_detached_pages(ZList<ZPage> * list)270 void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) {
271   ZLocker locker(&_lock);
272   list->transfer(&_detached);
273 }
274 
flush_cache(size_t size)275 void ZPageAllocator::flush_cache(size_t size) {
276   ZList<ZPage> list;
277 
278   _cache.flush(&list, size);
279 
280   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
281     detach_page(page);
282   }
283 }
284 
check_out_of_memory_during_initialization()285 void ZPageAllocator::check_out_of_memory_during_initialization() {
286   if (!is_init_completed()) {
287     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
288   }
289 }
290 
alloc_page_common_inner(uint8_t type,size_t size,ZAllocationFlags flags)291 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
292   const size_t max = max_available(flags.no_reserve());
293   if (max < size) {
294     // Not enough free memory
295     return NULL;
296   }
297 
298   // Try allocating from the page cache
299   ZPage* const cached_page = _cache.alloc_page(type, size);
300   if (cached_page != NULL) {
301     return cached_page;
302   }
303 
304   // Try allocate from the pre-mapped memory
305   ZPage* const pre_mapped_page = _pre_mapped.alloc_page(type, size);
306   if (pre_mapped_page != NULL) {
307     return pre_mapped_page;
308   }
309 
310   // Flush any remaining pre-mapped memory so that
311   // subsequent allocations can use the physical memory.
312   flush_pre_mapped();
313 
314   // Try ensure that physical memory is available
315   const size_t unused = try_ensure_unused(size, flags.no_reserve());
316   if (unused < size) {
317     // Flush cache to free up more physical memory
318     flush_cache(size - unused);
319   }
320 
321   // Create new page and allocate physical memory
322   return create_page(type, size);
323 }
324 
alloc_page_common(uint8_t type,size_t size,ZAllocationFlags flags)325 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
326   ZPage* const page = alloc_page_common_inner(type, size, flags);
327   if (page == NULL) {
328     // Out of memory
329     return NULL;
330   }
331 
332   // Update used statistics
333   increase_used(size, flags.relocation());
334 
335   // Send trace event
336   ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags);
337 
338   return page;
339 }
340 
alloc_page_blocking(uint8_t type,size_t size,ZAllocationFlags flags)341 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
342   // Prepare to block
343   ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
344 
345   _lock.lock();
346 
347   // Try non-blocking allocation
348   ZPage* page = alloc_page_common(type, size, flags);
349   if (page == NULL) {
350     // Allocation failed, enqueue request
351     _queue.insert_last(&request);
352   }
353 
354   _lock.unlock();
355 
356   if (page == NULL) {
357     // Allocation failed
358     ZStatTimer timer(ZCriticalPhaseAllocationStall);
359 
360     // We can only block if VM is fully initialized
361     check_out_of_memory_during_initialization();
362 
363     do {
364       // Start asynchronous GC
365       ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
366 
367       // Wait for allocation to complete or fail
368       page = request.wait();
369     } while (page == gc_marker);
370 
371     {
372       // Guard deletion of underlying semaphore. This is a workaround for a
373       // bug in sem_post() in glibc < 2.21, where it's not safe to destroy
374       // the semaphore immediately after returning from sem_wait(). The
375       // reason is that sem_post() can touch the semaphore after a waiting
376       // thread have returned from sem_wait(). To avoid this race we are
377       // forcing the waiting thread to acquire/release the lock held by the
378       // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
379       ZLocker locker(&_lock);
380     }
381   }
382 
383   return page;
384 }
385 
alloc_page_nonblocking(uint8_t type,size_t size,ZAllocationFlags flags)386 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
387   ZLocker locker(&_lock);
388   return alloc_page_common(type, size, flags);
389 }
390 
alloc_page(uint8_t type,size_t size,ZAllocationFlags flags)391 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
392   ZPage* const page = flags.non_blocking()
393                       ? alloc_page_nonblocking(type, size, flags)
394                       : alloc_page_blocking(type, size, flags);
395   if (page == NULL) {
396     // Out of memory
397     return NULL;
398   }
399 
400   // Map page if needed
401   if (!page->is_mapped()) {
402     map_page(page);
403   }
404 
405   // Reset page. This updates the page's sequence number and must
406   // be done after page allocation, which potentially blocked in
407   // a safepoint where the global sequence number was updated.
408   page->reset();
409 
410   // Update allocation statistics. Exclude worker threads to avoid
411   // artificial inflation of the allocation rate due to relocation.
412   if (!flags.worker_thread()) {
413     // Note that there are two allocation rate counters, which have
414     // different purposes and are sampled at different frequencies.
415     const size_t bytes = page->size();
416     ZStatInc(ZCounterAllocationRate, bytes);
417     ZStatInc(ZStatAllocRate::counter(), bytes);
418   }
419 
420   return page;
421 }
422 
satisfy_alloc_queue()423 void ZPageAllocator::satisfy_alloc_queue() {
424   for (;;) {
425     ZPageAllocRequest* const request = _queue.first();
426     if (request == NULL) {
427       // Allocation queue is empty
428       return;
429     }
430 
431     ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
432     if (page == NULL) {
433       // Allocation could not be satisfied, give up
434       return;
435     }
436 
437     // Allocation succeeded, dequeue and satisfy request. Note that
438     // the dequeue operation must happen first, since the request
439     // will immediately be deallocated once it has been satisfied.
440     _queue.remove(request);
441     request->satisfy(page);
442   }
443 }
444 
detach_memory(const ZVirtualMemory & vmem,ZPhysicalMemory & pmem)445 void ZPageAllocator::detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem) {
446   const uintptr_t addr = vmem.start();
447 
448   // Unmap physical memory
449   _physical.unmap(pmem, addr);
450 
451   // Free physical memory
452   _physical.free(pmem);
453 
454   // Clear physical mapping
455   pmem.clear();
456 }
457 
flip_page(ZPage * page)458 void ZPageAllocator::flip_page(ZPage* page) {
459   const ZPhysicalMemory& pmem = page->physical_memory();
460   const uintptr_t addr = page->start();
461 
462   // Flip physical mapping
463   _physical.flip(pmem, addr);
464 }
465 
flip_pre_mapped()466 void ZPageAllocator::flip_pre_mapped() {
467   if (_pre_mapped.available() == 0) {
468     // Nothing to flip
469     return;
470   }
471 
472   const ZPhysicalMemory& pmem = _pre_mapped.physical_memory();
473   const ZVirtualMemory& vmem = _pre_mapped.virtual_memory();
474 
475   // Flip physical mapping
476   _physical.flip(pmem, vmem.start());
477 }
478 
free_page(ZPage * page,bool reclaimed)479 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
480   ZLocker locker(&_lock);
481 
482   // Update used statistics
483   decrease_used(page->size(), reclaimed);
484 
485   // Cache page
486   _cache.free_page(page);
487 
488   // Try satisfy blocked allocations
489   satisfy_alloc_queue();
490 }
491 
is_alloc_stalled() const492 bool ZPageAllocator::is_alloc_stalled() const {
493   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
494   return !_queue.is_empty();
495 }
496 
check_out_of_memory()497 void ZPageAllocator::check_out_of_memory() {
498   ZLocker locker(&_lock);
499 
500   // Fail allocation requests that were enqueued before the
501   // last GC cycle started, otherwise start a new GC cycle.
502   for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
503     if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
504       // Start a new GC cycle, keep allocation requests enqueued
505       request->satisfy(gc_marker);
506       return;
507     }
508 
509     // Out of memory, fail allocation request
510     _queue.remove_first();
511     request->satisfy(NULL);
512   }
513 }
514