1 /*
2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/z/zCollectedHeap.hpp"
26 #include "gc/z/zGlobals.hpp"
27 #include "gc/z/zHeap.inline.hpp"
28 #include "gc/z/zObjectAllocator.hpp"
29 #include "gc/z/zPage.inline.hpp"
30 #include "gc/z/zStat.hpp"
31 #include "gc/z/zThread.hpp"
32 #include "gc/z/zUtils.inline.hpp"
33 #include "logging/log.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/safepoint.hpp"
36 #include "runtime/thread.hpp"
37 #include "runtime/threadSMR.hpp"
38 #include "utilities/align.hpp"
39 #include "utilities/debug.hpp"
40 
41 static const ZStatCounter ZCounterUndoObjectAllocationSucceeded("Memory", "Undo Object Allocation Succeeded", ZStatUnitOpsPerSecond);
42 static const ZStatCounter ZCounterUndoObjectAllocationFailed("Memory", "Undo Object Allocation Failed", ZStatUnitOpsPerSecond);
43 
ZObjectAllocator(uint nworkers)44 ZObjectAllocator::ZObjectAllocator(uint nworkers) :
45     _nworkers(nworkers),
46     _used(0),
47     _shared_medium_page(NULL),
48     _shared_small_page(NULL),
49     _worker_small_page(NULL) {}
50 
alloc_page(uint8_t type,size_t size,ZAllocationFlags flags)51 ZPage* ZObjectAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
52   ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
53   if (page != NULL) {
54     // Increment used bytes
55     Atomic::add(size, _used.addr());
56   }
57 
58   return page;
59 }
60 
alloc_object_in_shared_page(ZPage ** shared_page,uint8_t page_type,size_t page_size,size_t size,ZAllocationFlags flags)61 uintptr_t ZObjectAllocator::alloc_object_in_shared_page(ZPage** shared_page,
62                                                         uint8_t page_type,
63                                                         size_t page_size,
64                                                         size_t size,
65                                                         ZAllocationFlags flags) {
66   uintptr_t addr = 0;
67   ZPage* page = *shared_page;
68 
69   if (page != NULL) {
70     addr = page->alloc_object_atomic(size);
71   }
72 
73   if (addr == 0) {
74     // Allocate new page
75     ZPage* const new_page = alloc_page(page_type, page_size, flags);
76     if (new_page != NULL) {
77       // Allocate object before installing the new page
78       addr = new_page->alloc_object(size);
79 
80     retry:
81       // Install new page
82       ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
83       if (prev_page != page) {
84         if (prev_page == NULL) {
85           // Previous page was retired, retry installing the new page
86           page = prev_page;
87           goto retry;
88         }
89 
90         // Another page already installed, try allocation there first
91         const uintptr_t prev_addr = prev_page->alloc_object_atomic(size);
92         if (prev_addr == 0) {
93           // Allocation failed, retry installing the new page
94           page = prev_page;
95           goto retry;
96         }
97 
98         // Allocation succeeded in already installed page
99         addr = prev_addr;
100 
101         // Undo new page allocation
102         ZHeap::heap()->undo_alloc_page(new_page);
103       }
104     }
105   }
106 
107   return addr;
108 }
109 
alloc_large_object(size_t size,ZAllocationFlags flags)110 uintptr_t ZObjectAllocator::alloc_large_object(size_t size, ZAllocationFlags flags) {
111   assert(ZThread::is_java(), "Should be a Java thread");
112 
113   uintptr_t addr = 0;
114 
115   // Allocate new large page
116   const size_t page_size = align_up(size, ZGranuleSize);
117   ZPage* const page = alloc_page(ZPageTypeLarge, page_size, flags);
118   if (page != NULL) {
119     // Allocate the object
120     addr = page->alloc_object(size);
121   }
122 
123   return addr;
124 }
125 
alloc_medium_object(size_t size,ZAllocationFlags flags)126 uintptr_t ZObjectAllocator::alloc_medium_object(size_t size, ZAllocationFlags flags) {
127   return alloc_object_in_shared_page(_shared_medium_page.addr(), ZPageTypeMedium, ZPageSizeMedium, size, flags);
128 }
129 
alloc_small_object_from_nonworker(size_t size,ZAllocationFlags flags)130 uintptr_t ZObjectAllocator::alloc_small_object_from_nonworker(size_t size, ZAllocationFlags flags) {
131   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_runtime_worker(),
132          "Should be a Java, VM or Runtime worker thread");
133 
134   // Non-worker small page allocation can never use the reserve
135   flags.set_no_reserve();
136 
137   return alloc_object_in_shared_page(_shared_small_page.addr(), ZPageTypeSmall, ZPageSizeSmall, size, flags);
138 }
139 
alloc_small_object_from_worker(size_t size,ZAllocationFlags flags)140 uintptr_t ZObjectAllocator::alloc_small_object_from_worker(size_t size, ZAllocationFlags flags) {
141   assert(ZThread::is_worker(), "Should be a worker thread");
142 
143   ZPage* page = _worker_small_page.get();
144   uintptr_t addr = 0;
145 
146   if (page != NULL) {
147     addr = page->alloc_object(size);
148   }
149 
150   if (addr == 0) {
151     // Allocate new page
152     page = alloc_page(ZPageTypeSmall, ZPageSizeSmall, flags);
153     if (page != NULL) {
154       addr = page->alloc_object(size);
155     }
156     _worker_small_page.set(page);
157   }
158 
159   return addr;
160 }
161 
alloc_small_object(size_t size,ZAllocationFlags flags)162 uintptr_t ZObjectAllocator::alloc_small_object(size_t size, ZAllocationFlags flags) {
163   if (flags.worker_thread()) {
164     return alloc_small_object_from_worker(size, flags);
165   } else {
166     return alloc_small_object_from_nonworker(size, flags);
167   }
168 }
169 
alloc_object(size_t size,ZAllocationFlags flags)170 uintptr_t ZObjectAllocator::alloc_object(size_t size, ZAllocationFlags flags) {
171   if (size <= ZObjectSizeLimitSmall) {
172     // Small
173     return alloc_small_object(size, flags);
174   } else if (size <= ZObjectSizeLimitMedium) {
175     // Medium
176     return alloc_medium_object(size, flags);
177   } else {
178     // Large
179     return alloc_large_object(size, flags);
180   }
181 }
182 
alloc_object(size_t size)183 uintptr_t ZObjectAllocator::alloc_object(size_t size) {
184   assert(ZThread::is_java(), "Must be a Java thread");
185 
186   ZAllocationFlags flags;
187   flags.set_no_reserve();
188 
189   return alloc_object(size, flags);
190 }
191 
alloc_object_for_relocation(size_t size)192 uintptr_t ZObjectAllocator::alloc_object_for_relocation(size_t size) {
193   assert(ZThread::is_java() || ZThread::is_vm() || ZThread::is_worker() || ZThread::is_runtime_worker(),
194          "Unknown thread");
195 
196   ZAllocationFlags flags;
197   flags.set_relocation();
198   flags.set_non_blocking();
199 
200   if (ZThread::is_worker()) {
201     flags.set_worker_thread();
202   }
203 
204   return alloc_object(size, flags);
205 }
206 
undo_alloc_large_object(ZPage * page)207 bool ZObjectAllocator::undo_alloc_large_object(ZPage* page) {
208   assert(page->type() == ZPageTypeLarge, "Invalid page type");
209 
210   // Undo page allocation
211   ZHeap::heap()->undo_alloc_page(page);
212   return true;
213 }
214 
undo_alloc_medium_object(ZPage * page,uintptr_t addr,size_t size)215 bool ZObjectAllocator::undo_alloc_medium_object(ZPage* page, uintptr_t addr, size_t size) {
216   assert(page->type() == ZPageTypeMedium, "Invalid page type");
217 
218   // Try atomic undo on shared page
219   return page->undo_alloc_object_atomic(addr, size);
220 }
221 
undo_alloc_small_object_from_nonworker(ZPage * page,uintptr_t addr,size_t size)222 bool ZObjectAllocator::undo_alloc_small_object_from_nonworker(ZPage* page, uintptr_t addr, size_t size) {
223   assert(page->type() == ZPageTypeSmall, "Invalid page type");
224 
225   // Try atomic undo on shared page
226   return page->undo_alloc_object_atomic(addr, size);
227 }
228 
undo_alloc_small_object_from_worker(ZPage * page,uintptr_t addr,size_t size)229 bool ZObjectAllocator::undo_alloc_small_object_from_worker(ZPage* page, uintptr_t addr, size_t size) {
230   assert(page->type() == ZPageTypeSmall, "Invalid page type");
231   assert(page == _worker_small_page.get(), "Invalid page");
232 
233   // Non-atomic undo on worker-local page
234   const bool success = page->undo_alloc_object(addr, size);
235   assert(success, "Should always succeed");
236   return success;
237 }
238 
undo_alloc_small_object(ZPage * page,uintptr_t addr,size_t size)239 bool ZObjectAllocator::undo_alloc_small_object(ZPage* page, uintptr_t addr, size_t size) {
240   if (ZThread::is_worker()) {
241     return undo_alloc_small_object_from_worker(page, addr, size);
242   } else {
243     return undo_alloc_small_object_from_nonworker(page, addr, size);
244   }
245 }
246 
undo_alloc_object(ZPage * page,uintptr_t addr,size_t size)247 bool ZObjectAllocator::undo_alloc_object(ZPage* page, uintptr_t addr, size_t size) {
248   const uint8_t type = page->type();
249 
250   if (type == ZPageTypeSmall) {
251     return undo_alloc_small_object(page, addr, size);
252   } else if (type == ZPageTypeMedium) {
253     return undo_alloc_medium_object(page, addr, size);
254   } else {
255     return undo_alloc_large_object(page);
256   }
257 }
258 
undo_alloc_object_for_relocation(ZPage * page,uintptr_t addr,size_t size)259 void ZObjectAllocator::undo_alloc_object_for_relocation(ZPage* page, uintptr_t addr, size_t size) {
260   if (undo_alloc_object(page, addr, size)) {
261     ZStatInc(ZCounterUndoObjectAllocationSucceeded);
262   } else {
263     ZStatInc(ZCounterUndoObjectAllocationFailed);
264     log_trace(gc)("Failed to undo object allocation: " PTR_FORMAT ", Size: " SIZE_FORMAT ", Thread: " PTR_FORMAT " (%s)",
265                   addr, size, ZThread::id(), ZThread::name());
266   }
267 }
268 
used() const269 size_t ZObjectAllocator::used() const {
270   size_t total_used = 0;
271 
272   ZPerCPUConstIterator<size_t> iter(&_used);
273   for (const size_t* cpu_used; iter.next(&cpu_used);) {
274     total_used += *cpu_used;
275   }
276 
277   return total_used;
278 }
279 
remaining() const280 size_t ZObjectAllocator::remaining() const {
281   assert(ZThread::is_java(), "Should be a Java thread");
282 
283   ZPage* page = _shared_small_page.get();
284   if (page != NULL) {
285     return page->remaining();
286   }
287 
288   return 0;
289 }
290 
retire_pages()291 void ZObjectAllocator::retire_pages() {
292   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
293 
294   // Reset used
295   _used.set_all(0);
296 
297   // Reset allocation pages
298   _shared_medium_page.set(NULL);
299   _shared_small_page.set_all(NULL);
300   _worker_small_page.set_all(NULL);
301 }
302