1 /*
2  * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/shared/gcLogPrecious.hpp"
26 #include "gc/shared/suspendibleThreadSet.hpp"
27 #include "gc/z/zArray.inline.hpp"
28 #include "gc/z/zCollectedHeap.hpp"
29 #include "gc/z/zFuture.inline.hpp"
30 #include "gc/z/zGlobals.hpp"
31 #include "gc/z/zLock.inline.hpp"
32 #include "gc/z/zPage.inline.hpp"
33 #include "gc/z/zPageAllocator.inline.hpp"
34 #include "gc/z/zPageCache.hpp"
35 #include "gc/z/zSafeDelete.inline.hpp"
36 #include "gc/z/zStat.hpp"
37 #include "gc/z/zTask.hpp"
38 #include "gc/z/zUncommitter.hpp"
39 #include "gc/z/zUnmapper.hpp"
40 #include "gc/z/zWorkers.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "logging/log.hpp"
43 #include "runtime/globals.hpp"
44 #include "runtime/init.hpp"
45 #include "runtime/java.hpp"
46 #include "utilities/debug.hpp"
47 #include "utilities/globalDefinitions.hpp"
48 
49 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
50 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
51 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
52 
53 enum ZPageAllocationStall {
54   ZPageAllocationStallSuccess,
55   ZPageAllocationStallFailed,
56   ZPageAllocationStallStartGC
57 };
58 
59 class ZPageAllocation : public StackObj {
60   friend class ZList<ZPageAllocation>;
61 
62 private:
63   const uint8_t                 _type;
64   const size_t                  _size;
65   const ZAllocationFlags        _flags;
66   const uint32_t                _seqnum;
67   size_t                        _flushed;
68   size_t                        _committed;
69   ZList<ZPage>                  _pages;
70   ZListNode<ZPageAllocation>    _node;
71   ZFuture<ZPageAllocationStall> _stall_result;
72 
73 public:
ZPageAllocation(uint8_t type,size_t size,ZAllocationFlags flags)74   ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
75       _type(type),
76       _size(size),
77       _flags(flags),
78       _seqnum(ZGlobalSeqNum),
79       _flushed(0),
80       _committed(0),
81       _pages(),
82       _node(),
83       _stall_result() {}
84 
type() const85   uint8_t type() const {
86     return _type;
87   }
88 
size() const89   size_t size() const {
90     return _size;
91   }
92 
flags() const93   ZAllocationFlags flags() const {
94     return _flags;
95   }
96 
seqnum() const97   uint32_t seqnum() const {
98     return _seqnum;
99   }
100 
flushed() const101   size_t flushed() const {
102     return _flushed;
103   }
104 
set_flushed(size_t flushed)105   void set_flushed(size_t flushed) {
106     _flushed = flushed;
107   }
108 
committed() const109   size_t committed() const {
110     return _committed;
111   }
112 
set_committed(size_t committed)113   void set_committed(size_t committed) {
114     _committed = committed;
115   }
116 
wait()117   ZPageAllocationStall wait() {
118     return _stall_result.get();
119   }
120 
pages()121   ZList<ZPage>* pages() {
122     return &_pages;
123   }
124 
satisfy(ZPageAllocationStall result)125   void satisfy(ZPageAllocationStall result) {
126     _stall_result.set(result);
127   }
128 };
129 
ZPageAllocator(ZWorkers * workers,size_t min_capacity,size_t initial_capacity,size_t max_capacity)130 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
131                                size_t min_capacity,
132                                size_t initial_capacity,
133                                size_t max_capacity) :
134     _lock(),
135     _cache(),
136     _virtual(max_capacity),
137     _physical(max_capacity),
138     _min_capacity(min_capacity),
139     _max_capacity(max_capacity),
140     _current_max_capacity(max_capacity),
141     _capacity(0),
142     _claimed(0),
143     _used(0),
144     _used_high(0),
145     _used_low(0),
146     _reclaimed(0),
147     _stalled(),
148     _nstalled(0),
149     _satisfied(),
150     _unmapper(new ZUnmapper(this)),
151     _uncommitter(new ZUncommitter(this)),
152     _safe_delete(),
153     _initialized(false) {
154 
155   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
156     return;
157   }
158 
159   log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
160   log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
161   log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
162   if (ZPageSizeMedium > 0) {
163     log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
164   } else {
165     log_info_p(gc, init)("Medium Page Size: N/A");
166   }
167   log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
168 
169   // Warn if system limits could stop us from reaching max capacity
170   _physical.warn_commit_limits(max_capacity);
171 
172   // Check if uncommit should and can be enabled
173   _physical.try_enable_uncommit(min_capacity, max_capacity);
174 
175   // Pre-map initial capacity
176   if (!prime_cache(workers, initial_capacity)) {
177     log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
178     return;
179   }
180 
181   // Successfully initialized
182   _initialized = true;
183 }
184 
185 class ZPreTouchTask : public ZTask {
186 private:
187   const ZPhysicalMemoryManager* const _physical;
188   volatile uintptr_t                  _start;
189   const uintptr_t                     _end;
190 
191 public:
ZPreTouchTask(const ZPhysicalMemoryManager * physical,uintptr_t start,uintptr_t end)192   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
193       ZTask("ZPreTouchTask"),
194       _physical(physical),
195       _start(start),
196       _end(end) {}
197 
work()198   virtual void work() {
199     for (;;) {
200       // Get granule offset
201       const size_t size = ZGranuleSize;
202       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
203       if (offset >= _end) {
204         // Done
205         break;
206       }
207 
208       // Pre-touch granule
209       _physical->pretouch(offset, size);
210     }
211   }
212 };
213 
prime_cache(ZWorkers * workers,size_t size)214 bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
215   ZAllocationFlags flags;
216 
217   flags.set_non_blocking();
218   flags.set_low_address();
219 
220   ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
221   if (page == NULL) {
222     return false;
223   }
224 
225   if (AlwaysPreTouch) {
226     // Pre-touch page
227     ZPreTouchTask task(&_physical, page->start(), page->end());
228     workers->run_all(&task);
229   }
230 
231   free_page(page, false /* reclaimed */);
232 
233   return true;
234 }
235 
is_initialized() const236 bool ZPageAllocator::is_initialized() const {
237   return _initialized;
238 }
239 
min_capacity() const240 size_t ZPageAllocator::min_capacity() const {
241   return _min_capacity;
242 }
243 
max_capacity() const244 size_t ZPageAllocator::max_capacity() const {
245   return _max_capacity;
246 }
247 
soft_max_capacity() const248 size_t ZPageAllocator::soft_max_capacity() const {
249   // Note that SoftMaxHeapSize is a manageable flag
250   const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
251   const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
252   return MIN2(soft_max_capacity, current_max_capacity);
253 }
254 
capacity() const255 size_t ZPageAllocator::capacity() const {
256   return Atomic::load(&_capacity);
257 }
258 
used() const259 size_t ZPageAllocator::used() const {
260   return Atomic::load(&_used);
261 }
262 
unused() const263 size_t ZPageAllocator::unused() const {
264   const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
265   const ssize_t used = (ssize_t)Atomic::load(&_used);
266   const ssize_t claimed = (ssize_t)Atomic::load(&_claimed);
267   const ssize_t unused = capacity - used - claimed;
268   return unused > 0 ? (size_t)unused : 0;
269 }
270 
stats() const271 ZPageAllocatorStats ZPageAllocator::stats() const {
272   ZLocker<ZLock> locker(&_lock);
273   return ZPageAllocatorStats(_min_capacity,
274                              _max_capacity,
275                              soft_max_capacity(),
276                              _capacity,
277                              _used,
278                              _used_high,
279                              _used_low,
280                              _reclaimed);
281 }
282 
reset_statistics()283 void ZPageAllocator::reset_statistics() {
284   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
285   _reclaimed = 0;
286   _used_high = _used_low = _used;
287   _nstalled = 0;
288 }
289 
increase_capacity(size_t size)290 size_t ZPageAllocator::increase_capacity(size_t size) {
291   const size_t increased = MIN2(size, _current_max_capacity - _capacity);
292 
293   if (increased > 0) {
294     // Update atomically since we have concurrent readers
295     Atomic::add(&_capacity, increased);
296 
297     // Record time of last commit. When allocation, we prefer increasing
298     // the capacity over flushing the cache. That means there could be
299     // expired pages in the cache at this time. However, since we are
300     // increasing the capacity we are obviously in need of committed
301     // memory and should therefore not be uncommitting memory.
302     _cache.set_last_commit();
303   }
304 
305   return increased;
306 }
307 
decrease_capacity(size_t size,bool set_max_capacity)308 void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
309   // Update atomically since we have concurrent readers
310   Atomic::sub(&_capacity, size);
311 
312   if (set_max_capacity) {
313     // Adjust current max capacity to avoid further attempts to increase capacity
314     log_error_p(gc)("Forced to lower max Java heap size from "
315                     SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
316                     _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
317                     _capacity / M, percent_of(_capacity, _max_capacity));
318 
319     // Update atomically since we have concurrent readers
320     Atomic::store(&_current_max_capacity, _capacity);
321   }
322 }
323 
increase_used(size_t size,bool worker_relocation)324 void ZPageAllocator::increase_used(size_t size, bool worker_relocation) {
325   if (worker_relocation) {
326     // Allocating a page for the purpose of worker relocation has
327     // a negative contribution to the number of reclaimed bytes.
328     _reclaimed -= size;
329   }
330 
331   // Update atomically since we have concurrent readers
332   const size_t used = Atomic::add(&_used, size);
333   if (used > _used_high) {
334     _used_high = used;
335   }
336 }
337 
decrease_used(size_t size,bool reclaimed)338 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
339   // Only pages explicitly released with the reclaimed flag set
340   // counts as reclaimed bytes. This flag is true when we release
341   // a page after relocation, and is false when we release a page
342   // to undo an allocation.
343   if (reclaimed) {
344     _reclaimed += size;
345   }
346 
347   // Update atomically since we have concurrent readers
348   const size_t used = Atomic::sub(&_used, size);
349   if (used < _used_low) {
350     _used_low = used;
351   }
352 }
353 
commit_page(ZPage * page)354 bool ZPageAllocator::commit_page(ZPage* page) {
355   // Commit physical memory
356   return _physical.commit(page->physical_memory());
357 }
358 
uncommit_page(ZPage * page)359 void ZPageAllocator::uncommit_page(ZPage* page) {
360   if (!ZUncommit) {
361     return;
362   }
363 
364   // Uncommit physical memory
365   _physical.uncommit(page->physical_memory());
366 }
367 
map_page(const ZPage * page) const368 void ZPageAllocator::map_page(const ZPage* page) const {
369   // Map physical memory
370   _physical.map(page->start(), page->physical_memory());
371 }
372 
unmap_page(const ZPage * page) const373 void ZPageAllocator::unmap_page(const ZPage* page) const {
374   // Unmap physical memory
375   _physical.unmap(page->start(), page->size());
376 }
377 
destroy_page(ZPage * page)378 void ZPageAllocator::destroy_page(ZPage* page) {
379   // Free virtual memory
380   _virtual.free(page->virtual_memory());
381 
382   // Free physical memory
383   _physical.free(page->physical_memory());
384 
385   // Delete page safely
386   _safe_delete(page);
387 }
388 
is_alloc_allowed(size_t size) const389 bool ZPageAllocator::is_alloc_allowed(size_t size) const {
390   const size_t available = _current_max_capacity - _used - _claimed;
391   return available >= size;
392 }
393 
alloc_page_common_inner(uint8_t type,size_t size,ZList<ZPage> * pages)394 bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZList<ZPage>* pages) {
395   if (!is_alloc_allowed(size)) {
396     // Out of memory
397     return false;
398   }
399 
400   // Try allocate from the page cache
401   ZPage* const page = _cache.alloc_page(type, size);
402   if (page != NULL) {
403     // Success
404     pages->insert_last(page);
405     return true;
406   }
407 
408   // Try increase capacity
409   const size_t increased = increase_capacity(size);
410   if (increased < size) {
411     // Could not increase capacity enough to satisfy the allocation
412     // completely. Flush the page cache to satisfy the remainder.
413     const size_t remaining = size - increased;
414     _cache.flush_for_allocation(remaining, pages);
415   }
416 
417   // Success
418   return true;
419 }
420 
alloc_page_common(ZPageAllocation * allocation)421 bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
422   const uint8_t type = allocation->type();
423   const size_t size = allocation->size();
424   const ZAllocationFlags flags = allocation->flags();
425   ZList<ZPage>* const pages = allocation->pages();
426 
427   if (!alloc_page_common_inner(type, size, pages)) {
428     // Out of memory
429     return false;
430   }
431 
432   // Updated used statistics
433   increase_used(size, flags.worker_relocation());
434 
435   // Success
436   return true;
437 }
438 
check_out_of_memory_during_initialization()439 static void check_out_of_memory_during_initialization() {
440   if (!is_init_completed()) {
441     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
442   }
443 }
444 
alloc_page_stall(ZPageAllocation * allocation)445 bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
446   ZStatTimer timer(ZCriticalPhaseAllocationStall);
447   EventZAllocationStall event;
448   ZPageAllocationStall result;
449 
450   // We can only block if the VM is fully initialized
451   check_out_of_memory_during_initialization();
452 
453   // Increment stalled counter
454   Atomic::inc(&_nstalled);
455 
456   do {
457     // Start asynchronous GC
458     ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
459 
460     // Wait for allocation to complete, fail or request a GC
461     result = allocation->wait();
462   } while (result == ZPageAllocationStallStartGC);
463 
464   {
465     //
466     // We grab the lock here for two different reasons:
467     //
468     // 1) Guard deletion of underlying semaphore. This is a workaround for
469     // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
470     // the semaphore immediately after returning from sem_wait(). The
471     // reason is that sem_post() can touch the semaphore after a waiting
472     // thread have returned from sem_wait(). To avoid this race we are
473     // forcing the waiting thread to acquire/release the lock held by the
474     // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
475     //
476     // 2) Guard the list of satisfied pages.
477     //
478     ZLocker<ZLock> locker(&_lock);
479     _satisfied.remove(allocation);
480   }
481 
482   // Send event
483   event.commit(allocation->type(), allocation->size());
484 
485   return (result == ZPageAllocationStallSuccess);
486 }
487 
alloc_page_or_stall(ZPageAllocation * allocation)488 bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) {
489   {
490     ZLocker<ZLock> locker(&_lock);
491 
492     if (alloc_page_common(allocation)) {
493       // Success
494       return true;
495     }
496 
497     // Failed
498     if (allocation->flags().non_blocking()) {
499       // Don't stall
500       return false;
501     }
502 
503     // Enqueue allocation request
504     _stalled.insert_last(allocation);
505   }
506 
507   // Stall
508   return alloc_page_stall(allocation);
509 }
510 
alloc_page_create(ZPageAllocation * allocation)511 ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
512   const size_t size = allocation->size();
513 
514   // Allocate virtual memory. To make error handling a lot more straight
515   // forward, we allocate virtual memory before destroying flushed pages.
516   // Flushed pages are also unmapped and destroyed asynchronously, so we
517   // can't immediately reuse that part of the address space anyway.
518   const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
519   if (vmem.is_null()) {
520     log_error(gc)("Out of address space");
521     return NULL;
522   }
523 
524   ZPhysicalMemory pmem;
525   size_t flushed = 0;
526 
527   // Harvest physical memory from flushed pages
528   ZListRemoveIterator<ZPage> iter(allocation->pages());
529   for (ZPage* page; iter.next(&page);) {
530     flushed += page->size();
531 
532     // Harvest flushed physical memory
533     ZPhysicalMemory& fmem = page->physical_memory();
534     pmem.add_segments(fmem);
535     fmem.remove_segments();
536 
537     // Unmap and destroy page
538     _unmapper->unmap_and_destroy_page(page);
539   }
540 
541   if (flushed > 0) {
542     allocation->set_flushed(flushed);
543 
544     // Update statistics
545     ZStatInc(ZCounterPageCacheFlush, flushed);
546     log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
547   }
548 
549   // Allocate any remaining physical memory. Capacity and used has
550   // already been adjusted, we just need to fetch the memory, which
551   // is guaranteed to succeed.
552   if (flushed < size) {
553     const size_t remaining = size - flushed;
554     allocation->set_committed(remaining);
555     _physical.alloc(pmem, remaining);
556   }
557 
558   // Create new page
559   return new ZPage(allocation->type(), vmem, pmem);
560 }
561 
is_alloc_satisfied(ZPageAllocation * allocation)562 static bool is_alloc_satisfied(ZPageAllocation* allocation) {
563   // The allocation is immediately satisfied if the list of pages contains
564   // exactly one page, with the type and size that was requested.
565   return allocation->pages()->size() == 1 &&
566          allocation->pages()->first()->type() == allocation->type() &&
567          allocation->pages()->first()->size() == allocation->size();
568 }
569 
alloc_page_finalize(ZPageAllocation * allocation)570 ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
571   // Fast path
572   if (is_alloc_satisfied(allocation)) {
573     return allocation->pages()->remove_first();
574   }
575 
576   // Slow path
577   ZPage* const page = alloc_page_create(allocation);
578   if (page == NULL) {
579     // Out of address space
580     return NULL;
581   }
582 
583   // Commit page
584   if (commit_page(page)) {
585     // Success
586     map_page(page);
587     return page;
588   }
589 
590   // Failed or partially failed. Split of any successfully committed
591   // part of the page into a new page and insert it into list of pages,
592   // so that it will be re-inserted into the page cache.
593   ZPage* const committed_page = page->split_committed();
594   destroy_page(page);
595 
596   if (committed_page != NULL) {
597     map_page(committed_page);
598     allocation->pages()->insert_last(committed_page);
599   }
600 
601   return NULL;
602 }
603 
alloc_page_failed(ZPageAllocation * allocation)604 void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
605   ZLocker<ZLock> locker(&_lock);
606 
607   size_t freed = 0;
608 
609   // Free any allocated/flushed pages
610   ZListRemoveIterator<ZPage> iter(allocation->pages());
611   for (ZPage* page; iter.next(&page);) {
612     freed += page->size();
613     free_page_inner(page, false /* reclaimed */);
614   }
615 
616   // Adjust capacity and used to reflect the failed capacity increase
617   const size_t remaining = allocation->size() - freed;
618   decrease_used(remaining, false /* reclaimed */);
619   decrease_capacity(remaining, true /* set_max_capacity */);
620 
621   // Try satisfy stalled allocations
622   satisfy_stalled();
623 }
624 
alloc_page(uint8_t type,size_t size,ZAllocationFlags flags)625 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
626   EventZPageAllocation event;
627 
628 retry:
629   ZPageAllocation allocation(type, size, flags);
630 
631   // Allocate one or more pages from the page cache. If the allocation
632   // succeeds but the returned pages don't cover the complete allocation,
633   // then finalize phase is allowed to allocate the remaining memory
634   // directly from the physical memory manager. Note that this call might
635   // block in a safepoint if the non-blocking flag is not set.
636   if (!alloc_page_or_stall(&allocation)) {
637     // Out of memory
638     return NULL;
639   }
640 
641   ZPage* const page = alloc_page_finalize(&allocation);
642   if (page == NULL) {
643     // Failed to commit or map. Clean up and retry, in the hope that
644     // we can still allocate by flushing the page cache (more aggressively).
645     alloc_page_failed(&allocation);
646     goto retry;
647   }
648 
649   // Reset page. This updates the page's sequence number and must
650   // be done after we potentially blocked in a safepoint (stalled)
651   // where the global sequence number was updated.
652   page->reset();
653 
654   // Update allocation statistics. Exclude worker relocations to avoid
655   // artificial inflation of the allocation rate during relocation.
656   if (!flags.worker_relocation() && is_init_completed()) {
657     // Note that there are two allocation rate counters, which have
658     // different purposes and are sampled at different frequencies.
659     const size_t bytes = page->size();
660     ZStatInc(ZCounterAllocationRate, bytes);
661     ZStatInc(ZStatAllocRate::counter(), bytes);
662   }
663 
664   // Send event
665   event.commit(type, size, allocation.flushed(), allocation.committed(),
666                page->physical_memory().nsegments(), flags.non_blocking());
667 
668   return page;
669 }
670 
satisfy_stalled()671 void ZPageAllocator::satisfy_stalled() {
672   for (;;) {
673     ZPageAllocation* const allocation = _stalled.first();
674     if (allocation == NULL) {
675       // Allocation queue is empty
676       return;
677     }
678 
679     if (!alloc_page_common(allocation)) {
680       // Allocation could not be satisfied, give up
681       return;
682     }
683 
684     // Allocation succeeded, dequeue and satisfy allocation request.
685     // Note that we must dequeue the allocation request first, since
686     // it will immediately be deallocated once it has been satisfied.
687     _stalled.remove(allocation);
688     _satisfied.insert_last(allocation);
689     allocation->satisfy(ZPageAllocationStallSuccess);
690   }
691 }
692 
free_page_inner(ZPage * page,bool reclaimed)693 void ZPageAllocator::free_page_inner(ZPage* page, bool reclaimed) {
694   // Update used statistics
695   decrease_used(page->size(), reclaimed);
696 
697   // Set time when last used
698   page->set_last_used();
699 
700   // Cache page
701   _cache.free_page(page);
702 }
703 
free_page(ZPage * page,bool reclaimed)704 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
705   ZLocker<ZLock> locker(&_lock);
706 
707   // Free page
708   free_page_inner(page, reclaimed);
709 
710   // Try satisfy stalled allocations
711   satisfy_stalled();
712 }
713 
free_pages(const ZArray<ZPage * > * pages,bool reclaimed)714 void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages, bool reclaimed) {
715   ZLocker<ZLock> locker(&_lock);
716 
717   // Free pages
718   ZArrayIterator<ZPage*> iter(pages);
719   for (ZPage* page; iter.next(&page);) {
720     free_page_inner(page, reclaimed);
721   }
722 
723   // Try satisfy stalled allocations
724   satisfy_stalled();
725 }
726 
uncommit(uint64_t * timeout)727 size_t ZPageAllocator::uncommit(uint64_t* timeout) {
728   // We need to join the suspendible thread set while manipulating capacity and
729   // used, to make sure GC safepoints will have a consistent view. However, when
730   // ZVerifyViews is enabled we need to join at a broader scope to also make sure
731   // we don't change the address good mask after pages have been flushed, and
732   // thereby made invisible to pages_do(), but before they have been unmapped.
733   SuspendibleThreadSetJoiner joiner(ZVerifyViews);
734   ZList<ZPage> pages;
735   size_t flushed;
736 
737   {
738     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
739     ZLocker<ZLock> locker(&_lock);
740 
741     // Never uncommit below min capacity. We flush out and uncommit chunks at
742     // a time (~0.8% of the max capacity, but at least one granule and at most
743     // 256M), in case demand for memory increases while we are uncommitting.
744     const size_t retain = MAX2(_used, _min_capacity);
745     const size_t release = _capacity - retain;
746     const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
747     const size_t flush = MIN2(release, limit);
748 
749     // Flush pages to uncommit
750     flushed = _cache.flush_for_uncommit(flush, &pages, timeout);
751     if (flushed == 0) {
752       // Nothing flushed
753       return 0;
754     }
755 
756     // Record flushed pages as claimed
757     Atomic::add(&_claimed, flushed);
758   }
759 
760   // Unmap, uncommit, and destroy flushed pages
761   ZListRemoveIterator<ZPage> iter(&pages);
762   for (ZPage* page; iter.next(&page);) {
763     unmap_page(page);
764     uncommit_page(page);
765     destroy_page(page);
766   }
767 
768   {
769     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
770     ZLocker<ZLock> locker(&_lock);
771 
772     // Adjust claimed and capacity to reflect the uncommit
773     Atomic::sub(&_claimed, flushed);
774     decrease_capacity(flushed, false /* set_max_capacity */);
775   }
776 
777   return flushed;
778 }
779 
enable_deferred_delete() const780 void ZPageAllocator::enable_deferred_delete() const {
781   _safe_delete.enable_deferred_delete();
782 }
783 
disable_deferred_delete() const784 void ZPageAllocator::disable_deferred_delete() const {
785   _safe_delete.disable_deferred_delete();
786 }
787 
debug_map_page(const ZPage * page) const788 void ZPageAllocator::debug_map_page(const ZPage* page) const {
789   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
790   _physical.debug_map(page->start(), page->physical_memory());
791 }
792 
debug_unmap_page(const ZPage * page) const793 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
794   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
795   _physical.debug_unmap(page->start(), page->size());
796 }
797 
pages_do(ZPageClosure * cl) const798 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
799   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
800 
801   ZListIterator<ZPageAllocation> iter_satisfied(&_satisfied);
802   for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) {
803     ZListIterator<ZPage> iter_pages(allocation->pages());
804     for (ZPage* page; iter_pages.next(&page);) {
805       cl->do_page(page);
806     }
807   }
808 
809   _cache.pages_do(cl);
810 }
811 
has_alloc_stalled() const812 bool ZPageAllocator::has_alloc_stalled() const {
813   return Atomic::load(&_nstalled) != 0;
814 }
815 
check_out_of_memory()816 void ZPageAllocator::check_out_of_memory() {
817   ZLocker<ZLock> locker(&_lock);
818 
819   // Fail allocation requests that were enqueued before the
820   // last GC cycle started, otherwise start a new GC cycle.
821   for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
822     if (allocation->seqnum() == ZGlobalSeqNum) {
823       // Start a new GC cycle, keep allocation requests enqueued
824       allocation->satisfy(ZPageAllocationStallStartGC);
825       return;
826     }
827 
828     // Out of memory, fail allocation request
829     _stalled.remove(allocation);
830     _satisfied.insert_last(allocation);
831     allocation->satisfy(ZPageAllocationStallFailed);
832   }
833 }
834 
threads_do(ThreadClosure * tc) const835 void ZPageAllocator::threads_do(ThreadClosure* tc) const {
836   tc->do_thread(_unmapper);
837   tc->do_thread(_uncommitter);
838 }
839