1 /*
2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/shared/gcLogPrecious.hpp"
26 #include "gc/shared/suspendibleThreadSet.hpp"
27 #include "gc/z/zAddress.inline.hpp"
28 #include "gc/z/zCollectedHeap.hpp"
29 #include "gc/z/zFuture.inline.hpp"
30 #include "gc/z/zGlobals.hpp"
31 #include "gc/z/zLock.inline.hpp"
32 #include "gc/z/zPage.inline.hpp"
33 #include "gc/z/zPageAllocator.hpp"
34 #include "gc/z/zPageCache.hpp"
35 #include "gc/z/zSafeDelete.inline.hpp"
36 #include "gc/z/zStat.hpp"
37 #include "gc/z/zTask.hpp"
38 #include "gc/z/zTracer.inline.hpp"
39 #include "gc/z/zUncommitter.hpp"
40 #include "gc/z/zUnmapper.hpp"
41 #include "gc/z/zWorkers.hpp"
42 #include "jfr/jfrEvents.hpp"
43 #include "logging/log.hpp"
44 #include "runtime/globals.hpp"
45 #include "runtime/init.hpp"
46 #include "runtime/java.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/globalDefinitions.hpp"
49 
50 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
51 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
52 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
53 
54 enum ZPageAllocationStall {
55   ZPageAllocationStallSuccess,
56   ZPageAllocationStallFailed,
57   ZPageAllocationStallStartGC
58 };
59 
60 class ZPageAllocation : public StackObj {
61   friend class ZList<ZPageAllocation>;
62 
63 private:
64   const uint8_t                 _type;
65   const size_t                  _size;
66   const ZAllocationFlags        _flags;
67   const uint32_t                _seqnum;
68   size_t                        _flushed;
69   size_t                        _committed;
70   ZList<ZPage>                  _pages;
71   ZListNode<ZPageAllocation>    _node;
72   ZFuture<ZPageAllocationStall> _stall_result;
73 
74 public:
ZPageAllocation(uint8_t type,size_t size,ZAllocationFlags flags)75   ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
76       _type(type),
77       _size(size),
78       _flags(flags),
79       _seqnum(ZGlobalSeqNum),
80       _flushed(0),
81       _committed(0),
82       _pages(),
83       _node(),
84       _stall_result() {}
85 
type() const86   uint8_t type() const {
87     return _type;
88   }
89 
size() const90   size_t size() const {
91     return _size;
92   }
93 
flags() const94   ZAllocationFlags flags() const {
95     return _flags;
96   }
97 
seqnum() const98   uint32_t seqnum() const {
99     return _seqnum;
100   }
101 
flushed() const102   size_t flushed() const {
103     return _flushed;
104   }
105 
set_flushed(size_t flushed)106   void set_flushed(size_t flushed) {
107     _flushed = flushed;
108   }
109 
committed() const110   size_t committed() const {
111     return _committed;
112   }
113 
set_committed(size_t committed)114   void set_committed(size_t committed) {
115     _committed = committed;
116   }
117 
wait()118   ZPageAllocationStall wait() {
119     return _stall_result.get();
120   }
121 
pages()122   ZList<ZPage>* pages() {
123     return &_pages;
124   }
125 
satisfy(ZPageAllocationStall result)126   void satisfy(ZPageAllocationStall result) {
127     _stall_result.set(result);
128   }
129 };
130 
ZPageAllocator(ZWorkers * workers,size_t min_capacity,size_t initial_capacity,size_t max_capacity,size_t max_reserve)131 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
132                                size_t min_capacity,
133                                size_t initial_capacity,
134                                size_t max_capacity,
135                                size_t max_reserve) :
136     _lock(),
137     _cache(),
138     _virtual(max_capacity),
139     _physical(max_capacity),
140     _min_capacity(min_capacity),
141     _max_capacity(max_capacity),
142     _max_reserve(max_reserve),
143     _current_max_capacity(max_capacity),
144     _capacity(0),
145     _claimed(0),
146     _used(0),
147     _used_high(0),
148     _used_low(0),
149     _allocated(0),
150     _reclaimed(0),
151     _stalled(),
152     _satisfied(),
153     _unmapper(new ZUnmapper(this)),
154     _uncommitter(new ZUncommitter(this)),
155     _safe_delete(),
156     _initialized(false) {
157 
158   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
159     return;
160   }
161 
162   log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
163   log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
164   log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
165   log_info_p(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
166   if (ZPageSizeMedium > 0) {
167     log_info_p(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
168   } else {
169     log_info_p(gc, init)("Medium Page Size: N/A");
170   }
171   log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
172 
173   // Warn if system limits could stop us from reaching max capacity
174   _physical.warn_commit_limits(max_capacity);
175 
176   // Check if uncommit should and can be enabled
177   _physical.try_enable_uncommit(min_capacity, max_capacity);
178 
179   // Pre-map initial capacity
180   if (!prime_cache(workers, initial_capacity)) {
181     log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
182     return;
183   }
184 
185   // Successfully initialized
186   _initialized = true;
187 }
188 
189 class ZPreTouchTask : public ZTask {
190 private:
191   const ZPhysicalMemoryManager* const _physical;
192   volatile uintptr_t                  _start;
193   const uintptr_t                     _end;
194 
195 public:
ZPreTouchTask(const ZPhysicalMemoryManager * physical,uintptr_t start,uintptr_t end)196   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
197       ZTask("ZPreTouchTask"),
198       _physical(physical),
199       _start(start),
200       _end(end) {}
201 
work()202   virtual void work() {
203     for (;;) {
204       // Get granule offset
205       const size_t size = ZGranuleSize;
206       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
207       if (offset >= _end) {
208         // Done
209         break;
210       }
211 
212       // Pre-touch granule
213       _physical->pretouch(offset, size);
214     }
215   }
216 };
217 
prime_cache(ZWorkers * workers,size_t size)218 bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
219   ZAllocationFlags flags;
220 
221   flags.set_non_blocking();
222   flags.set_low_address();
223 
224   ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
225   if (page == NULL) {
226     return false;
227   }
228 
229   if (AlwaysPreTouch) {
230     // Pre-touch page
231     ZPreTouchTask task(&_physical, page->start(), page->end());
232     workers->run_parallel(&task);
233   }
234 
235   free_page(page, false /* reclaimed */);
236 
237   return true;
238 }
239 
is_initialized() const240 bool ZPageAllocator::is_initialized() const {
241   return _initialized;
242 }
243 
min_capacity() const244 size_t ZPageAllocator::min_capacity() const {
245   return _min_capacity;
246 }
247 
max_capacity() const248 size_t ZPageAllocator::max_capacity() const {
249   return _max_capacity;
250 }
251 
soft_max_capacity() const252 size_t ZPageAllocator::soft_max_capacity() const {
253   // Note that SoftMaxHeapSize is a manageable flag
254   const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
255   const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
256   return MIN2(soft_max_capacity, current_max_capacity);
257 }
258 
capacity() const259 size_t ZPageAllocator::capacity() const {
260   return Atomic::load(&_capacity);
261 }
262 
max_reserve() const263 size_t ZPageAllocator::max_reserve() const {
264   return _max_reserve;
265 }
266 
used_high() const267 size_t ZPageAllocator::used_high() const {
268   return _used_high;
269 }
270 
used_low() const271 size_t ZPageAllocator::used_low() const {
272   return _used_low;
273 }
274 
used() const275 size_t ZPageAllocator::used() const {
276   return Atomic::load(&_used);
277 }
278 
unused() const279 size_t ZPageAllocator::unused() const {
280   const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
281   const ssize_t used = (ssize_t)Atomic::load(&_used);
282   const ssize_t claimed = (ssize_t)Atomic::load(&_claimed);
283   const ssize_t max_reserve = (ssize_t)_max_reserve;
284   const ssize_t unused = capacity - used - claimed - max_reserve;
285   return unused > 0 ? (size_t)unused : 0;
286 }
287 
allocated() const288 size_t ZPageAllocator::allocated() const {
289   return _allocated;
290 }
291 
reclaimed() const292 size_t ZPageAllocator::reclaimed() const {
293   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
294 }
295 
reset_statistics()296 void ZPageAllocator::reset_statistics() {
297   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
298   _allocated = 0;
299   _reclaimed = 0;
300   _used_high = _used_low = _used;
301 }
302 
increase_capacity(size_t size)303 size_t ZPageAllocator::increase_capacity(size_t size) {
304   const size_t increased = MIN2(size, _current_max_capacity - _capacity);
305 
306   if (increased > 0) {
307     // Update atomically since we have concurrent readers
308     Atomic::add(&_capacity, increased);
309 
310     // Record time of last commit. When allocation, we prefer increasing
311     // the capacity over flushing the cache. That means there could be
312     // expired pages in the cache at this time. However, since we are
313     // increasing the capacity we are obviously in need of committed
314     // memory and should therefore not be uncommitting memory.
315     _cache.set_last_commit();
316   }
317 
318   return increased;
319 }
320 
decrease_capacity(size_t size,bool set_max_capacity)321 void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
322   // Update atomically since we have concurrent readers
323   Atomic::sub(&_capacity, size);
324 
325   if (set_max_capacity) {
326     // Adjust current max capacity to avoid further attempts to increase capacity
327     log_error_p(gc)("Forced to lower max Java heap size from "
328                     SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
329                     _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
330                     _capacity / M, percent_of(_capacity, _max_capacity));
331 
332     // Update atomically since we have concurrent readers
333     Atomic::store(&_current_max_capacity, _capacity);
334   }
335 }
336 
increase_used(size_t size,bool relocation)337 void ZPageAllocator::increase_used(size_t size, bool relocation) {
338   if (relocation) {
339     // Allocating a page for the purpose of relocation has a
340     // negative contribution to the number of reclaimed bytes.
341     _reclaimed -= size;
342   }
343   _allocated += size;
344 
345   // Update atomically since we have concurrent readers
346   const size_t used = Atomic::add(&_used, size);
347   if (used > _used_high) {
348     _used_high = used;
349   }
350 }
351 
decrease_used(size_t size,bool reclaimed)352 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
353   // Only pages explicitly released with the reclaimed flag set
354   // counts as reclaimed bytes. This flag is true when we release
355   // a page after relocation, and is false when we release a page
356   // to undo an allocation.
357   if (reclaimed) {
358     _reclaimed += size;
359   } else {
360     _allocated -= size;
361   }
362 
363   // Update atomically since we have concurrent readers
364   const size_t used = Atomic::sub(&_used, size);
365   if (used < _used_low) {
366     _used_low = used;
367   }
368 }
369 
commit_page(ZPage * page)370 bool ZPageAllocator::commit_page(ZPage* page) {
371   // Commit physical memory
372   return _physical.commit(page->physical_memory());
373 }
374 
uncommit_page(ZPage * page)375 void ZPageAllocator::uncommit_page(ZPage* page) {
376   if (!ZUncommit) {
377     return;
378   }
379 
380   // Uncommit physical memory
381   _physical.uncommit(page->physical_memory());
382 }
383 
map_page(const ZPage * page) const384 void ZPageAllocator::map_page(const ZPage* page) const {
385   // Map physical memory
386   _physical.map(page->start(), page->physical_memory());
387 }
388 
unmap_page(const ZPage * page) const389 void ZPageAllocator::unmap_page(const ZPage* page) const {
390   // Unmap physical memory
391   _physical.unmap(page->start(), page->size());
392 }
393 
destroy_page(ZPage * page)394 void ZPageAllocator::destroy_page(ZPage* page) {
395   // Free virtual memory
396   _virtual.free(page->virtual_memory());
397 
398   // Free physical memory
399   _physical.free(page->physical_memory());
400 
401   // Delete page safely
402   _safe_delete(page);
403 }
404 
is_alloc_allowed(size_t size,bool no_reserve) const405 bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
406   size_t available = _current_max_capacity - _used - _claimed;
407 
408   if (no_reserve) {
409     // The reserve should not be considered available
410     available -= MIN2(available, _max_reserve);
411   }
412 
413   return available >= size;
414 }
415 
is_alloc_allowed_from_cache(size_t size,bool no_reserve) const416 bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
417   size_t available = _capacity - _used - _claimed;
418 
419   if (no_reserve) {
420     // The reserve should not be considered available
421     available -= MIN2(available, _max_reserve);
422   } else if (_capacity != _current_max_capacity) {
423     // Always increase capacity before using the reserve
424     return false;
425   }
426 
427   return available >= size;
428 }
429 
alloc_page_common_inner(uint8_t type,size_t size,bool no_reserve,ZList<ZPage> * pages)430 bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages) {
431   if (!is_alloc_allowed(size, no_reserve)) {
432     // Out of memory
433     return false;
434   }
435 
436   // Try allocate from the page cache
437   if (is_alloc_allowed_from_cache(size, no_reserve)) {
438     ZPage* const page = _cache.alloc_page(type, size);
439     if (page != NULL) {
440       // Success
441       pages->insert_last(page);
442       return true;
443     }
444   }
445 
446   // Try increase capacity
447   const size_t increased = increase_capacity(size);
448   if (increased < size) {
449     // Could not increase capacity enough to satisfy the allocation
450     // completely. Flush the page cache to satisfy the remainder.
451     const size_t remaining = size - increased;
452     _cache.flush_for_allocation(remaining, pages);
453   }
454 
455   // Success
456   return true;
457 }
458 
alloc_page_common(ZPageAllocation * allocation)459 bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
460   const uint8_t type = allocation->type();
461   const size_t size = allocation->size();
462   const ZAllocationFlags flags = allocation->flags();
463   ZList<ZPage>* const pages = allocation->pages();
464 
465   // Try allocate without using the reserve
466   if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
467     // If allowed to, try allocate using the reserve
468     if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
469       // Out of memory
470       return false;
471     }
472   }
473 
474   // Updated used statistics
475   increase_used(size, flags.relocation());
476 
477   // Success
478   return true;
479 }
480 
check_out_of_memory_during_initialization()481 static void check_out_of_memory_during_initialization() {
482   if (!is_init_completed()) {
483     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
484   }
485 }
486 
alloc_page_stall(ZPageAllocation * allocation)487 bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
488   ZStatTimer timer(ZCriticalPhaseAllocationStall);
489   EventZAllocationStall event;
490   ZPageAllocationStall result;
491 
492   // We can only block if the VM is fully initialized
493   check_out_of_memory_during_initialization();
494 
495   do {
496     // Start asynchronous GC
497     ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
498 
499     // Wait for allocation to complete, fail or request a GC
500     result = allocation->wait();
501   } while (result == ZPageAllocationStallStartGC);
502 
503   {
504     //
505     // We grab the lock here for two different reasons:
506     //
507     // 1) Guard deletion of underlying semaphore. This is a workaround for
508     // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
509     // the semaphore immediately after returning from sem_wait(). The
510     // reason is that sem_post() can touch the semaphore after a waiting
511     // thread have returned from sem_wait(). To avoid this race we are
512     // forcing the waiting thread to acquire/release the lock held by the
513     // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
514     //
515     // 2) Guard the list of satisfied pages.
516     //
517     ZLocker<ZLock> locker(&_lock);
518     _satisfied.remove(allocation);
519   }
520 
521   // Send event
522   event.commit(allocation->type(), allocation->size());
523 
524   return (result == ZPageAllocationStallSuccess);
525 }
526 
alloc_page_or_stall(ZPageAllocation * allocation)527 bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) {
528   {
529     ZLocker<ZLock> locker(&_lock);
530 
531     if (alloc_page_common(allocation)) {
532       // Success
533       return true;
534     }
535 
536     // Failed
537     if (allocation->flags().non_blocking()) {
538       // Don't stall
539       return false;
540     }
541 
542     // Enqueue allocation request
543     _stalled.insert_last(allocation);
544   }
545 
546   // Stall
547   return alloc_page_stall(allocation);
548 }
549 
alloc_page_create(ZPageAllocation * allocation)550 ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
551   const size_t size = allocation->size();
552 
553   // Allocate virtual memory. To make error handling a lot more straight
554   // forward, we allocate virtual memory before destroying flushed pages.
555   // Flushed pages are also unmapped and destroyed asynchronously, so we
556   // can't immediately reuse that part of the address space anyway.
557   const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
558   if (vmem.is_null()) {
559     log_error(gc)("Out of address space");
560     return NULL;
561   }
562 
563   ZPhysicalMemory pmem;
564   size_t flushed = 0;
565 
566   // Harvest physical memory from flushed pages
567   ZListRemoveIterator<ZPage> iter(allocation->pages());
568   for (ZPage* page; iter.next(&page);) {
569     flushed += page->size();
570 
571     // Harvest flushed physical memory
572     ZPhysicalMemory& fmem = page->physical_memory();
573     pmem.add_segments(fmem);
574     fmem.remove_segments();
575 
576     // Unmap and destroy page
577     _unmapper->unmap_and_destroy_page(page);
578   }
579 
580   if (flushed > 0) {
581     allocation->set_flushed(flushed);
582 
583     // Update statistics
584     ZStatInc(ZCounterPageCacheFlush, flushed);
585     log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
586   }
587 
588   // Allocate any remaining physical memory. Capacity and used has
589   // already been adjusted, we just need to fetch the memory, which
590   // is guaranteed to succeed.
591   if (flushed < size) {
592     const size_t remaining = size - flushed;
593     allocation->set_committed(remaining);
594     _physical.alloc(pmem, remaining);
595   }
596 
597   // Create new page
598   return new ZPage(allocation->type(), vmem, pmem);
599 }
600 
is_alloc_satisfied(ZPageAllocation * allocation)601 static bool is_alloc_satisfied(ZPageAllocation* allocation) {
602   // The allocation is immediately satisfied if the list of pages contains
603   // exactly one page, with the type and size that was requested.
604   return allocation->pages()->size() == 1 &&
605          allocation->pages()->first()->type() == allocation->type() &&
606          allocation->pages()->first()->size() == allocation->size();
607 }
608 
alloc_page_finalize(ZPageAllocation * allocation)609 ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
610   // Fast path
611   if (is_alloc_satisfied(allocation)) {
612     return allocation->pages()->remove_first();
613   }
614 
615   // Slow path
616   ZPage* const page = alloc_page_create(allocation);
617   if (page == NULL) {
618     // Out of address space
619     return NULL;
620   }
621 
622   // Commit page
623   if (commit_page(page)) {
624     // Success
625     map_page(page);
626     return page;
627   }
628 
629   // Failed or partially failed. Split of any successfully committed
630   // part of the page into a new page and insert it into list of pages,
631   // so that it will be re-inserted into the page cache.
632   ZPage* const committed_page = page->split_committed();
633   destroy_page(page);
634 
635   if (committed_page != NULL) {
636     map_page(committed_page);
637     allocation->pages()->insert_last(committed_page);
638   }
639 
640   return NULL;
641 }
642 
alloc_page_failed(ZPageAllocation * allocation)643 void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
644   ZLocker<ZLock> locker(&_lock);
645 
646   size_t freed = 0;
647 
648   // Free any allocated/flushed pages
649   ZListRemoveIterator<ZPage> iter(allocation->pages());
650   for (ZPage* page; iter.next(&page);) {
651     freed += page->size();
652     free_page_inner(page, false /* reclaimed */);
653   }
654 
655   // Adjust capacity and used to reflect the failed capacity increase
656   const size_t remaining = allocation->size() - freed;
657   decrease_used(remaining, false /* reclaimed */);
658   decrease_capacity(remaining, true /* set_max_capacity */);
659 
660   // Try satisfy stalled allocations
661   satisfy_stalled();
662 }
663 
alloc_page(uint8_t type,size_t size,ZAllocationFlags flags)664 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
665   EventZPageAllocation event;
666 
667 retry:
668   ZPageAllocation allocation(type, size, flags);
669 
670   // Allocate one or more pages from the page cache. If the allocation
671   // succeeds but the returned pages don't cover the complete allocation,
672   // then finalize phase is allowed to allocate the remaining memory
673   // directly from the physical memory manager. Note that this call might
674   // block in a safepoint if the non-blocking flag is not set.
675   if (!alloc_page_or_stall(&allocation)) {
676     // Out of memory
677     return NULL;
678   }
679 
680   ZPage* const page = alloc_page_finalize(&allocation);
681   if (page == NULL) {
682     // Failed to commit or map. Clean up and retry, in the hope that
683     // we can still allocate by flushing the page cache (more aggressively).
684     alloc_page_failed(&allocation);
685     goto retry;
686   }
687 
688   // Reset page. This updates the page's sequence number and must
689   // be done after we potentially blocked in a safepoint (stalled)
690   // where the global sequence number was updated.
691   page->reset();
692 
693   // Update allocation statistics. Exclude worker threads to avoid
694   // artificial inflation of the allocation rate due to relocation.
695   if (!flags.worker_thread()) {
696     // Note that there are two allocation rate counters, which have
697     // different purposes and are sampled at different frequencies.
698     const size_t bytes = page->size();
699     ZStatInc(ZCounterAllocationRate, bytes);
700     ZStatInc(ZStatAllocRate::counter(), bytes);
701   }
702 
703   // Send event
704   event.commit(type, size, allocation.flushed(), allocation.committed(),
705                page->physical_memory().nsegments(), flags.non_blocking(), flags.no_reserve());
706 
707   return page;
708 }
709 
satisfy_stalled()710 void ZPageAllocator::satisfy_stalled() {
711   for (;;) {
712     ZPageAllocation* const allocation = _stalled.first();
713     if (allocation == NULL) {
714       // Allocation queue is empty
715       return;
716     }
717 
718     if (!alloc_page_common(allocation)) {
719       // Allocation could not be satisfied, give up
720       return;
721     }
722 
723     // Allocation succeeded, dequeue and satisfy allocation request.
724     // Note that we must dequeue the allocation request first, since
725     // it will immediately be deallocated once it has been satisfied.
726     _stalled.remove(allocation);
727     _satisfied.insert_last(allocation);
728     allocation->satisfy(ZPageAllocationStallSuccess);
729   }
730 }
731 
free_page_inner(ZPage * page,bool reclaimed)732 void ZPageAllocator::free_page_inner(ZPage* page, bool reclaimed) {
733   // Update used statistics
734   decrease_used(page->size(), reclaimed);
735 
736   // Set time when last used
737   page->set_last_used();
738 
739   // Cache page
740   _cache.free_page(page);
741 }
742 
free_page(ZPage * page,bool reclaimed)743 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
744   ZLocker<ZLock> locker(&_lock);
745 
746   // Free page
747   free_page_inner(page, reclaimed);
748 
749   // Try satisfy stalled allocations
750   satisfy_stalled();
751 }
752 
uncommit(uint64_t * timeout)753 size_t ZPageAllocator::uncommit(uint64_t* timeout) {
754   // We need to join the suspendible thread set while manipulating capacity and
755   // used, to make sure GC safepoints will have a consistent view. However, when
756   // ZVerifyViews is enabled we need to join at a broader scope to also make sure
757   // we don't change the address good mask after pages have been flushed, and
758   // thereby made invisible to pages_do(), but before they have been unmapped.
759   SuspendibleThreadSetJoiner joiner(ZVerifyViews);
760   ZList<ZPage> pages;
761   size_t flushed;
762 
763   {
764     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
765     ZLocker<ZLock> locker(&_lock);
766 
767     // Never uncommit the reserve, and never uncommit below min capacity. We flush
768     // out and uncommit chunks at a time (~0.8% of the max capacity, but at least
769     // one granule and at most 256M), in case demand for memory increases while we
770     // are uncommitting.
771     const size_t retain = clamp(_used + _max_reserve, _min_capacity, _capacity);
772     const size_t release = _capacity - retain;
773     const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
774     const size_t flush = MIN2(release, limit);
775 
776     // Flush pages to uncommit
777     flushed = _cache.flush_for_uncommit(flush, &pages, timeout);
778     if (flushed == 0) {
779       // Nothing flushed
780       return 0;
781     }
782 
783     // Record flushed pages as claimed
784     Atomic::add(&_claimed, flushed);
785   }
786 
787   // Unmap, uncommit, and destroy flushed pages
788   ZListRemoveIterator<ZPage> iter(&pages);
789   for (ZPage* page; iter.next(&page);) {
790     unmap_page(page);
791     uncommit_page(page);
792     destroy_page(page);
793   }
794 
795   {
796     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
797     ZLocker<ZLock> locker(&_lock);
798 
799     // Adjust claimed and capacity to reflect the uncommit
800     Atomic::sub(&_claimed, flushed);
801     decrease_capacity(flushed, false /* set_max_capacity */);
802   }
803 
804   return flushed;
805 }
806 
enable_deferred_delete() const807 void ZPageAllocator::enable_deferred_delete() const {
808   _safe_delete.enable_deferred_delete();
809 }
810 
disable_deferred_delete() const811 void ZPageAllocator::disable_deferred_delete() const {
812   _safe_delete.disable_deferred_delete();
813 }
814 
debug_map_page(const ZPage * page) const815 void ZPageAllocator::debug_map_page(const ZPage* page) const {
816   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
817   _physical.debug_map(page->start(), page->physical_memory());
818 }
819 
debug_unmap_page(const ZPage * page) const820 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
821   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
822   _physical.debug_unmap(page->start(), page->size());
823 }
824 
pages_do(ZPageClosure * cl) const825 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
826   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
827 
828   ZListIterator<ZPageAllocation> iter_satisfied(&_satisfied);
829   for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) {
830     ZListIterator<ZPage> iter_pages(allocation->pages());
831     for (ZPage* page; iter_pages.next(&page);) {
832       cl->do_page(page);
833     }
834   }
835 
836   _cache.pages_do(cl);
837 }
838 
is_alloc_stalled() const839 bool ZPageAllocator::is_alloc_stalled() const {
840   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
841   return !_stalled.is_empty();
842 }
843 
check_out_of_memory()844 void ZPageAllocator::check_out_of_memory() {
845   ZLocker<ZLock> locker(&_lock);
846 
847   // Fail allocation requests that were enqueued before the
848   // last GC cycle started, otherwise start a new GC cycle.
849   for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
850     if (allocation->seqnum() == ZGlobalSeqNum) {
851       // Start a new GC cycle, keep allocation requests enqueued
852       allocation->satisfy(ZPageAllocationStallStartGC);
853       return;
854     }
855 
856     // Out of memory, fail allocation request
857     _stalled.remove(allocation);
858     _satisfied.insert_last(allocation);
859     allocation->satisfy(ZPageAllocationStallFailed);
860   }
861 }
862 
threads_do(ThreadClosure * tc) const863 void ZPageAllocator::threads_do(ThreadClosure* tc) const {
864   tc->do_thread(_unmapper);
865   tc->do_thread(_uncommitter);
866 }
867