1 /*
2 * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc_interface/allocTracer.hpp"
29 #include "gc_interface/collectedHeap.hpp"
30 #include "memory/threadLocalAllocBuffer.inline.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/thread.inline.hpp"
36 #include "services/lowMemoryDetector.hpp"
37 #include "utilities/copy.hpp"
38
39 // Inline allocation implementations.
40
post_allocation_setup_common(KlassHandle klass,HeapWord * obj_ptr)41 void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
42 HeapWord* obj_ptr) {
43 post_allocation_setup_no_klass_install(klass, obj_ptr);
44 oop obj = (oop)obj_ptr;
45 #if ! INCLUDE_ALL_GCS
46 obj->set_klass(klass());
47 #else
48 // Need a release store to ensure array/class length, mark word, and
49 // object zeroing are visible before setting the klass non-NULL, for
50 // concurrent collectors.
51 obj->release_set_klass(klass());
52 #endif
53 }
54
post_allocation_setup_no_klass_install(KlassHandle klass,HeapWord * obj_ptr)55 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
56 HeapWord* obj_ptr) {
57 oop obj = (oop)obj_ptr;
58
59 assert(obj != NULL, "NULL object pointer");
60 if (UseBiasedLocking && (klass() != NULL)) {
61 obj->set_mark(klass->prototype_header());
62 } else {
63 // May be bootstrapping
64 obj->set_mark(markOopDesc::prototype());
65 }
66 }
67
send_jfr_allocation_event(KlassHandle klass,HeapWord * obj,size_t size)68 inline void send_jfr_allocation_event(KlassHandle klass, HeapWord* obj, size_t size) {
69 Thread* t = Thread::current();
70 ThreadLocalAllocBuffer& tlab = t->tlab();
71 if (obj == tlab.start()) {
72 // allocate in new TLAB
73 size_t new_tlab_size = tlab.hard_size_bytes();
74 AllocTracer::send_allocation_in_new_tlab_event(klass, obj, new_tlab_size, size * HeapWordSize, t);
75 } else if (!tlab.in_used(obj)) {
76 // allocate outside TLAB
77 AllocTracer::send_allocation_outside_tlab_event(klass, obj, size * HeapWordSize, t);
78 }
79 }
80
81 // Support for jvmti, dtrace and jfr
post_allocation_notify(KlassHandle klass,oop obj,int size)82 inline void post_allocation_notify(KlassHandle klass, oop obj, int size) {
83 send_jfr_allocation_event(klass, (HeapWord*)obj, size);
84 // support low memory notifications (no-op if not enabled)
85 LowMemoryDetector::detect_low_memory_for_collected_pools();
86
87 // support for JVMTI VMObjectAlloc event (no-op if not enabled)
88 JvmtiExport::vm_object_alloc_event_collector(obj);
89
90 if (DTraceAllocProbes) {
91 // support for Dtrace object alloc event (no-op most of the time)
92 if (klass() != NULL && klass()->name() != NULL) {
93 SharedRuntime::dtrace_object_alloc(obj, size);
94 }
95 }
96 }
97
post_allocation_setup_obj(KlassHandle klass,HeapWord * obj_ptr,int size)98 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
99 HeapWord* obj_ptr,
100 int size) {
101 post_allocation_setup_common(klass, obj_ptr);
102 oop obj = (oop)obj_ptr;
103 assert(Universe::is_bootstrapping() ||
104 !obj->is_array(), "must not be an array");
105 // notify jvmti and dtrace
106 post_allocation_notify(klass, obj, size);
107 }
108
post_allocation_setup_array(KlassHandle klass,HeapWord * obj_ptr,int length)109 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
110 HeapWord* obj_ptr,
111 int length) {
112 // Set array length before setting the _klass field because a
113 // non-NULL klass field indicates that the object is parsable by
114 // concurrent GC.
115 assert(length >= 0, "length should be non-negative");
116 ((arrayOop)obj_ptr)->set_length(length);
117 post_allocation_setup_common(klass, obj_ptr);
118 oop new_obj = (oop)obj_ptr;
119 assert(new_obj->is_array(), "must be an array");
120 // notify jvmti and dtrace (must be after length is set for dtrace)
121 post_allocation_notify(klass, new_obj, new_obj->size());
122 }
123
common_mem_allocate_noinit(KlassHandle klass,size_t size,TRAPS)124 HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) {
125
126 // Clear unhandled oops for memory allocation. Memory allocation might
127 // not take out a lock if from tlab, so clear here.
128 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
129
130 if (HAS_PENDING_EXCEPTION) {
131 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
132 return NULL; // caller does a CHECK_0 too
133 }
134
135 HeapWord* result = NULL;
136 if (UseTLAB) {
137 result = allocate_from_tlab(klass, THREAD, size);
138 if (result != NULL) {
139 assert(!HAS_PENDING_EXCEPTION,
140 "Unexpected exception, will result in uninitialized storage");
141 return result;
142 }
143 }
144 bool gc_overhead_limit_was_exceeded = false;
145 result = Universe::heap()->mem_allocate(size,
146 &gc_overhead_limit_was_exceeded);
147 if (result != NULL) {
148 NOT_PRODUCT(Universe::heap()->
149 check_for_non_bad_heap_word_value(result, size));
150 assert(!HAS_PENDING_EXCEPTION,
151 "Unexpected exception, will result in uninitialized storage");
152 THREAD->incr_allocated_bytes(size * HeapWordSize);
153
154 return result;
155 }
156
157
158 if (!gc_overhead_limit_was_exceeded) {
159 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
160 report_java_out_of_memory("Java heap space");
161
162 if (JvmtiExport::should_post_resource_exhausted()) {
163 JvmtiExport::post_resource_exhausted(
164 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
165 "Java heap space");
166 }
167
168 THROW_OOP_0(Universe::out_of_memory_error_java_heap());
169 } else {
170 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
171 report_java_out_of_memory("GC overhead limit exceeded");
172
173 if (JvmtiExport::should_post_resource_exhausted()) {
174 JvmtiExport::post_resource_exhausted(
175 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
176 "GC overhead limit exceeded");
177 }
178
179 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
180 }
181 }
182
common_mem_allocate_init(KlassHandle klass,size_t size,TRAPS)183 HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) {
184 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
185 init_obj(obj, size);
186 return obj;
187 }
188
allocate_from_tlab(KlassHandle klass,Thread * thread,size_t size)189 HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) {
190 assert(UseTLAB, "should use UseTLAB");
191
192 HeapWord* obj = thread->tlab().allocate(size);
193 if (obj != NULL) {
194 return obj;
195 }
196 // Otherwise...
197 return allocate_from_tlab_slow(klass, thread, size);
198 }
199
init_obj(HeapWord * obj,size_t size)200 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
201 assert(obj != NULL, "cannot initialize NULL object");
202 const size_t hs = oopDesc::header_size();
203 assert(size >= hs, "unexpected object size");
204 ((oop)obj)->set_klass_gap(0);
205 Copy::fill_to_aligned_words(obj + hs, size - hs);
206 }
207
obj_allocate(KlassHandle klass,int size,TRAPS)208 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
209 debug_only(check_for_valid_allocation_state());
210 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
211 assert(size >= 0, "int won't convert to size_t");
212 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
213 post_allocation_setup_obj(klass, obj, size);
214 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
215 return (oop)obj;
216 }
217
array_allocate(KlassHandle klass,int size,int length,TRAPS)218 oop CollectedHeap::array_allocate(KlassHandle klass,
219 int size,
220 int length,
221 TRAPS) {
222 debug_only(check_for_valid_allocation_state());
223 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
224 assert(size >= 0, "int won't convert to size_t");
225 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
226 post_allocation_setup_array(klass, obj, length);
227 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
228 return (oop)obj;
229 }
230
array_allocate_nozero(KlassHandle klass,int size,int length,TRAPS)231 oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
232 int size,
233 int length,
234 TRAPS) {
235 debug_only(check_for_valid_allocation_state());
236 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
237 assert(size >= 0, "int won't convert to size_t");
238 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
239 ((oop)obj)->set_klass_gap(0);
240 post_allocation_setup_array(klass, obj, length);
241 #ifndef PRODUCT
242 const size_t hs = oopDesc::header_size()+1;
243 Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
244 #endif
245 return (oop)obj;
246 }
247
oop_iterate_no_header(OopClosure * cl)248 inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
249 NoHeaderExtendedOopClosure no_header_cl(cl);
250 oop_iterate(&no_header_cl);
251 }
252
253
align_allocation_or_fail(HeapWord * addr,HeapWord * end,unsigned short alignment_in_bytes)254 inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
255 HeapWord* end,
256 unsigned short alignment_in_bytes) {
257 if (alignment_in_bytes <= ObjectAlignmentInBytes) {
258 return addr;
259 }
260
261 assert(is_ptr_aligned(addr, HeapWordSize),
262 err_msg("Address " PTR_FORMAT " is not properly aligned.", p2i(addr)));
263 assert(is_size_aligned(alignment_in_bytes, HeapWordSize),
264 err_msg("Alignment size %u is incorrect.", alignment_in_bytes));
265
266 HeapWord* new_addr = (HeapWord*) align_pointer_up(addr, alignment_in_bytes);
267 size_t padding = pointer_delta(new_addr, addr);
268
269 if (padding == 0) {
270 return addr;
271 }
272
273 if (padding < CollectedHeap::min_fill_size()) {
274 padding += alignment_in_bytes / HeapWordSize;
275 assert(padding >= CollectedHeap::min_fill_size(),
276 err_msg("alignment_in_bytes %u is expect to be larger "
277 "than the minimum object size", alignment_in_bytes));
278 new_addr = addr + padding;
279 }
280
281 assert(new_addr > addr, err_msg("Unexpected arithmetic overflow "
282 PTR_FORMAT " not greater than " PTR_FORMAT, p2i(new_addr), p2i(addr)));
283 if(new_addr < end) {
284 CollectedHeap::fill_with_object(addr, padding);
285 return new_addr;
286 } else {
287 return NULL;
288 }
289 }
290
291 #ifndef PRODUCT
292
293 inline bool
promotion_should_fail(volatile size_t * count)294 CollectedHeap::promotion_should_fail(volatile size_t* count) {
295 // Access to count is not atomic; the value does not have to be exact.
296 if (PromotionFailureALot) {
297 const size_t gc_num = total_collections();
298 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
299 if (elapsed_gcs >= PromotionFailureALotInterval) {
300 // Test for unsigned arithmetic wrap-around.
301 if (++*count >= PromotionFailureALotCount) {
302 *count = 0;
303 return true;
304 }
305 }
306 }
307 return false;
308 }
309
promotion_should_fail()310 inline bool CollectedHeap::promotion_should_fail() {
311 return promotion_should_fail(&_promotion_failure_alot_count);
312 }
313
reset_promotion_should_fail(volatile size_t * count)314 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
315 if (PromotionFailureALot) {
316 _promotion_failure_alot_gc_number = total_collections();
317 *count = 0;
318 }
319 }
320
reset_promotion_should_fail()321 inline void CollectedHeap::reset_promotion_should_fail() {
322 reset_promotion_should_fail(&_promotion_failure_alot_count);
323 }
324 #endif // #ifndef PRODUCT
325
326 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
327