1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_OOP_INLINE_HPP
26 #define SHARE_OOPS_OOP_INLINE_HPP
27
28 #include "gc/shared/collectedHeap.hpp"
29 #include "memory/universe.hpp"
30 #include "oops/access.inline.hpp"
31 #include "oops/arrayKlass.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/compressedOops.inline.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/markWord.inline.hpp"
36 #include "oops/oop.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/os.hpp"
39 #include "utilities/align.hpp"
40 #include "utilities/macros.hpp"
41
42 // Implementation of all inlined member functions defined in oop.hpp
43 // We need a separate file to avoid circular references
44
mark() const45 markWord oopDesc::mark() const {
46 uintptr_t v = HeapAccess<MO_RELAXED>::load_at(as_oop(), mark_offset_in_bytes());
47 return markWord(v);
48 }
49
mark_raw() const50 markWord oopDesc::mark_raw() const {
51 return Atomic::load(&_mark);
52 }
53
mark_addr_raw() const54 markWord* oopDesc::mark_addr_raw() const {
55 return (markWord*) &_mark;
56 }
57
set_mark(markWord m)58 void oopDesc::set_mark(markWord m) {
59 HeapAccess<MO_RELAXED>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
60 }
61
set_mark_raw(markWord m)62 void oopDesc::set_mark_raw(markWord m) {
63 Atomic::store(&_mark, m);
64 }
65
set_mark_raw(HeapWord * mem,markWord m)66 void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
67 *(markWord*)(((char*)mem) + mark_offset_in_bytes()) = m;
68 }
69
release_set_mark(markWord m)70 void oopDesc::release_set_mark(markWord m) {
71 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m.value());
72 }
73
cas_set_mark(markWord new_mark,markWord old_mark)74 markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
75 uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(as_oop(), mark_offset_in_bytes(), old_mark.value(), new_mark.value());
76 return markWord(v);
77 }
78
cas_set_mark_raw(markWord new_mark,markWord old_mark,atomic_memory_order order)79 markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) {
80 return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
81 }
82
init_mark()83 void oopDesc::init_mark() {
84 set_mark(markWord::prototype_for_klass(klass()));
85 }
86
init_mark_raw()87 void oopDesc::init_mark_raw() {
88 set_mark_raw(markWord::prototype_for_klass(klass()));
89 }
90
klass() const91 Klass* oopDesc::klass() const {
92 if (UseCompressedClassPointers) {
93 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
94 } else {
95 return _metadata._klass;
96 }
97 }
98
klass_or_null() const99 Klass* oopDesc::klass_or_null() const volatile {
100 if (UseCompressedClassPointers) {
101 return CompressedKlassPointers::decode(_metadata._compressed_klass);
102 } else {
103 return _metadata._klass;
104 }
105 }
106
klass_or_null_acquire() const107 Klass* oopDesc::klass_or_null_acquire() const volatile {
108 if (UseCompressedClassPointers) {
109 // Workaround for non-const load_acquire parameter.
110 const volatile narrowKlass* addr = &_metadata._compressed_klass;
111 volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
112 return CompressedKlassPointers::decode(Atomic::load_acquire(xaddr));
113 } else {
114 return Atomic::load_acquire(&_metadata._klass);
115 }
116 }
117
klass_addr(HeapWord * mem)118 Klass** oopDesc::klass_addr(HeapWord* mem) {
119 // Only used internally and with CMS and will not work with
120 // UseCompressedOops
121 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
122 ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
123 return (Klass**) (((char*)mem) + in_bytes(offset));
124 }
125
compressed_klass_addr(HeapWord * mem)126 narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
127 assert(UseCompressedClassPointers, "only called by compressed klass pointers");
128 ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
129 return (narrowKlass*) (((char*)mem) + in_bytes(offset));
130 }
131
klass_addr()132 Klass** oopDesc::klass_addr() {
133 return klass_addr((HeapWord*)this);
134 }
135
compressed_klass_addr()136 narrowKlass* oopDesc::compressed_klass_addr() {
137 return compressed_klass_addr((HeapWord*)this);
138 }
139
140 #define CHECK_SET_KLASS(k) \
141 do { \
142 assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \
143 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \
144 } while (0)
145
set_klass(Klass * k)146 void oopDesc::set_klass(Klass* k) {
147 CHECK_SET_KLASS(k);
148 if (UseCompressedClassPointers) {
149 *compressed_klass_addr() = CompressedKlassPointers::encode_not_null(k);
150 } else {
151 *klass_addr() = k;
152 }
153 }
154
release_set_klass(HeapWord * mem,Klass * klass)155 void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
156 CHECK_SET_KLASS(klass);
157 if (UseCompressedClassPointers) {
158 Atomic::release_store(compressed_klass_addr(mem),
159 CompressedKlassPointers::encode_not_null(klass));
160 } else {
161 Atomic::release_store(klass_addr(mem), klass);
162 }
163 }
164
165 #undef CHECK_SET_KLASS
166
klass_gap() const167 int oopDesc::klass_gap() const {
168 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
169 }
170
set_klass_gap(HeapWord * mem,int v)171 void oopDesc::set_klass_gap(HeapWord* mem, int v) {
172 if (UseCompressedClassPointers) {
173 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
174 }
175 }
176
set_klass_gap(int v)177 void oopDesc::set_klass_gap(int v) {
178 set_klass_gap((HeapWord*)this, v);
179 }
180
is_a(Klass * k) const181 bool oopDesc::is_a(Klass* k) const {
182 return klass()->is_subtype_of(k);
183 }
184
size()185 int oopDesc::size() {
186 return size_given_klass(klass());
187 }
188
size_given_klass(Klass * klass)189 int oopDesc::size_given_klass(Klass* klass) {
190 int lh = klass->layout_helper();
191 int s;
192
193 // lh is now a value computed at class initialization that may hint
194 // at the size. For instances, this is positive and equal to the
195 // size. For arrays, this is negative and provides log2 of the
196 // array element size. For other oops, it is zero and thus requires
197 // a virtual call.
198 //
199 // We go to all this trouble because the size computation is at the
200 // heart of phase 2 of mark-compaction, and called for every object,
201 // alive or dead. So the speed here is equal in importance to the
202 // speed of allocation.
203
204 if (lh > Klass::_lh_neutral_value) {
205 if (!Klass::layout_helper_needs_slow_path(lh)) {
206 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
207 } else {
208 s = klass->oop_size(this);
209 }
210 } else if (lh <= Klass::_lh_neutral_value) {
211 // The most common case is instances; fall through if so.
212 if (lh < Klass::_lh_neutral_value) {
213 // Second most common case is arrays. We have to fetch the
214 // length of the array, shift (multiply) it appropriately,
215 // up to wordSize, add the header, and align to object size.
216 size_t size_in_bytes;
217 size_t array_length = (size_t) ((arrayOop)this)->length();
218 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
219 size_in_bytes += Klass::layout_helper_header_size(lh);
220
221 // This code could be simplified, but by keeping array_header_in_bytes
222 // in units of bytes and doing it this way we can round up just once,
223 // skipping the intermediate round to HeapWordSize.
224 s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
225
226 // UseParallelGC and UseG1GC can change the length field
227 // of an "old copy" of an object array in the young gen so it indicates
228 // the grey portion of an already copied array. This will cause the first
229 // disjunct below to fail if the two comparands are computed across such
230 // a concurrent change.
231 assert((s == klass->oop_size(this)) ||
232 (Universe::heap()->is_gc_active() && is_objArray() && is_forwarded() && (UseParallelGC || UseG1GC)),
233 "wrong array object size");
234 } else {
235 // Must be zero, so bite the bullet and take the virtual call.
236 s = klass->oop_size(this);
237 }
238 }
239
240 assert(s > 0, "Oop size must be greater than zero, not %d", s);
241 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
242 return s;
243 }
244
is_instance() const245 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
is_array() const246 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
is_objArray() const247 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
is_typeArray() const248 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
249
field_addr_raw(int offset) const250 void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
field_addr(int offset) const251 void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
252
253 template <class T>
obj_field_addr_raw(int offset) const254 T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
255
256 template <typename T>
field_offset(T * p) const257 size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
258
259 template <DecoratorSet decorators>
obj_field_access(int offset) const260 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
obj_field(int offset) const261 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
262
obj_field_put(int offset,oop value)263 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
264
byte_field(int offset) const265 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
byte_field_put(int offset,jbyte value)266 inline void oopDesc::byte_field_put(int offset, jbyte value) { HeapAccess<>::store_at(as_oop(), offset, value); }
267
char_field(int offset) const268 inline jchar oopDesc::char_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
char_field_put(int offset,jchar value)269 inline void oopDesc::char_field_put(int offset, jchar value) { HeapAccess<>::store_at(as_oop(), offset, value); }
270
bool_field(int offset) const271 inline jboolean oopDesc::bool_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
bool_field_put(int offset,jboolean value)272 inline void oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
bool_field_volatile(int offset) const273 inline jboolean oopDesc::bool_field_volatile(int offset) const { return HeapAccess<MO_SEQ_CST>::load_at(as_oop(), offset); }
bool_field_put_volatile(int offset,jboolean value)274 inline void oopDesc::bool_field_put_volatile(int offset, jboolean value) { HeapAccess<MO_SEQ_CST>::store_at(as_oop(), offset, jboolean(value & 1)); }
short_field(int offset) const275 inline jshort oopDesc::short_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
short_field_put(int offset,jshort value)276 inline void oopDesc::short_field_put(int offset, jshort value) { HeapAccess<>::store_at(as_oop(), offset, value); }
277
int_field(int offset) const278 inline jint oopDesc::int_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
int_field_raw(int offset) const279 inline jint oopDesc::int_field_raw(int offset) const { return RawAccess<>::load_at(as_oop(), offset); }
int_field_put(int offset,jint value)280 inline void oopDesc::int_field_put(int offset, jint value) { HeapAccess<>::store_at(as_oop(), offset, value); }
281
long_field(int offset) const282 inline jlong oopDesc::long_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
long_field_put(int offset,jlong value)283 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); }
284
float_field(int offset) const285 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
float_field_put(int offset,jfloat value)286 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); }
287
double_field(int offset) const288 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
double_field_put(int offset,jdouble value)289 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
290
is_locked() const291 bool oopDesc::is_locked() const {
292 return mark().is_locked();
293 }
294
is_unlocked() const295 bool oopDesc::is_unlocked() const {
296 return mark().is_unlocked();
297 }
298
has_bias_pattern() const299 bool oopDesc::has_bias_pattern() const {
300 return mark().has_bias_pattern();
301 }
302
has_bias_pattern_raw() const303 bool oopDesc::has_bias_pattern_raw() const {
304 return mark_raw().has_bias_pattern();
305 }
306
307 // Used only for markSweep, scavenging
is_gc_marked() const308 bool oopDesc::is_gc_marked() const {
309 return mark_raw().is_marked();
310 }
311
312 // Used by scavengers
is_forwarded() const313 bool oopDesc::is_forwarded() const {
314 // The extra heap check is needed since the obj might be locked, in which case the
315 // mark would point to a stack location and have the sentinel bit cleared
316 return mark_raw().is_marked();
317 }
318
319 // Used by scavengers
forward_to(oop p)320 void oopDesc::forward_to(oop p) {
321 verify_forwardee(p);
322 markWord m = markWord::encode_pointer_as_mark(p);
323 assert(m.decode_pointer() == p, "encoding must be reversable");
324 set_mark_raw(m);
325 }
326
327 // Used by parallel scavengers
cas_forward_to(oop p,markWord compare,atomic_memory_order order)328 bool oopDesc::cas_forward_to(oop p, markWord compare, atomic_memory_order order) {
329 verify_forwardee(p);
330 markWord m = markWord::encode_pointer_as_mark(p);
331 assert(m.decode_pointer() == p, "encoding must be reversable");
332 return cas_set_mark_raw(m, compare, order) == compare;
333 }
334
forward_to_atomic(oop p,markWord compare,atomic_memory_order order)335 oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
336 verify_forwardee(p);
337 markWord m = markWord::encode_pointer_as_mark(p);
338 assert(m.decode_pointer() == p, "encoding must be reversable");
339 markWord old_mark = cas_set_mark_raw(m, compare, order);
340 if (old_mark == compare) {
341 return NULL;
342 } else {
343 return (oop)old_mark.decode_pointer();
344 }
345 }
346
347 // Note that the forwardee is not the same thing as the displaced_mark.
348 // The forwardee is used when copying during scavenge and mark-sweep.
349 // It does need to clear the low two locking- and GC-related bits.
forwardee() const350 oop oopDesc::forwardee() const {
351 return (oop) mark_raw().decode_pointer();
352 }
353
354 // Note that the forwardee is not the same thing as the displaced_mark.
355 // The forwardee is used when copying during scavenge and mark-sweep.
356 // It does need to clear the low two locking- and GC-related bits.
forwardee_acquire() const357 oop oopDesc::forwardee_acquire() const {
358 return (oop) Atomic::load_acquire(&_mark).decode_pointer();
359 }
360
361 // The following method needs to be MT safe.
age() const362 uint oopDesc::age() const {
363 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
364 if (has_displaced_mark_raw()) {
365 return displaced_mark_raw().age();
366 } else {
367 return mark_raw().age();
368 }
369 }
370
incr_age()371 void oopDesc::incr_age() {
372 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
373 if (has_displaced_mark_raw()) {
374 set_displaced_mark_raw(displaced_mark_raw().incr_age());
375 } else {
376 set_mark_raw(mark_raw().incr_age());
377 }
378 }
379
380 template <typename OopClosureType>
oop_iterate(OopClosureType * cl)381 void oopDesc::oop_iterate(OopClosureType* cl) {
382 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
383 }
384
385 template <typename OopClosureType>
oop_iterate(OopClosureType * cl,MemRegion mr)386 void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
387 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
388 }
389
390 template <typename OopClosureType>
oop_iterate_size(OopClosureType * cl)391 int oopDesc::oop_iterate_size(OopClosureType* cl) {
392 Klass* k = klass();
393 int size = size_given_klass(k);
394 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
395 return size;
396 }
397
398 template <typename OopClosureType>
oop_iterate_size(OopClosureType * cl,MemRegion mr)399 int oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
400 Klass* k = klass();
401 int size = size_given_klass(k);
402 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
403 return size;
404 }
405
406 template <typename OopClosureType>
oop_iterate_backwards(OopClosureType * cl)407 void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
408 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, klass());
409 }
410
is_instanceof_or_null(oop obj,Klass * klass)411 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
412 return obj == NULL || obj->klass()->is_subtype_of(klass);
413 }
414
identity_hash()415 intptr_t oopDesc::identity_hash() {
416 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
417 // Note: The mark must be read into local variable to avoid concurrent updates.
418 markWord mrk = mark();
419 if (mrk.is_unlocked() && !mrk.has_no_hash()) {
420 return mrk.hash();
421 } else if (mrk.is_marked()) {
422 return mrk.hash();
423 } else {
424 return slow_identity_hash();
425 }
426 }
427
has_displaced_mark_raw() const428 bool oopDesc::has_displaced_mark_raw() const {
429 return mark_raw().has_displaced_mark_helper();
430 }
431
displaced_mark_raw() const432 markWord oopDesc::displaced_mark_raw() const {
433 return mark_raw().displaced_mark_helper();
434 }
435
set_displaced_mark_raw(markWord m)436 void oopDesc::set_displaced_mark_raw(markWord m) {
437 mark_raw().set_displaced_mark_helper(m);
438 }
439
440 // Supports deferred calling of obj->klass().
441 class DeferredObjectToKlass {
442 const oopDesc* _obj;
443
444 public:
DeferredObjectToKlass(const oopDesc * obj)445 DeferredObjectToKlass(const oopDesc* obj) : _obj(obj) {}
446
447 // Implicitly convertible to const Klass*.
operator const Klass*() const448 operator const Klass*() const {
449 return _obj->klass();
450 }
451 };
452
mark_must_be_preserved() const453 bool oopDesc::mark_must_be_preserved() const {
454 return mark_must_be_preserved(mark_raw());
455 }
456
mark_must_be_preserved(markWord m) const457 bool oopDesc::mark_must_be_preserved(markWord m) const {
458 // There's a circular dependency between oop.inline.hpp and
459 // markWord.inline.hpp because markWord::must_be_preserved wants to call
460 // oopDesc::klass(). This could be solved by calling klass() here. However,
461 // not all paths inside must_be_preserved calls klass(). Defer the call until
462 // the klass is actually needed.
463 return m.must_be_preserved(DeferredObjectToKlass(this));
464 }
465
mark_must_be_preserved_for_promotion_failure(markWord m) const466 bool oopDesc::mark_must_be_preserved_for_promotion_failure(markWord m) const {
467 return m.must_be_preserved_for_promotion_failure(DeferredObjectToKlass(this));
468 }
469
470 #endif // SHARE_OOPS_OOP_INLINE_HPP
471