1 /*
2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveUtils.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.inline.hpp"
30 #include "cds/metaspaceShared.hpp"
31 #include "classfile/classLoaderData.hpp"
32 #include "classfile/classLoaderDataShared.hpp"
33 #include "classfile/javaClasses.inline.hpp"
34 #include "classfile/moduleEntry.hpp"
35 #include "classfile/stringTable.hpp"
36 #include "classfile/symbolTable.hpp"
37 #include "classfile/systemDictionary.hpp"
38 #include "classfile/systemDictionaryShared.hpp"
39 #include "classfile/vmClasses.hpp"
40 #include "classfile/vmSymbols.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcVMOperations.hpp"
43 #include "logging/log.hpp"
44 #include "logging/logMessage.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/iterator.inline.hpp"
47 #include "memory/metadataFactory.hpp"
48 #include "memory/metaspaceClosure.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/compressedOops.inline.hpp"
52 #include "oops/fieldStreams.inline.hpp"
53 #include "oops/objArrayOop.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "prims/jvmtiExport.hpp"
56 #include "runtime/fieldDescriptor.inline.hpp"
57 #include "runtime/globals_extension.hpp"
58 #include "runtime/init.hpp"
59 #include "runtime/java.hpp"
60 #include "runtime/javaCalls.hpp"
61 #include "runtime/safepointVerifiers.hpp"
62 #include "utilities/bitMap.inline.hpp"
63 #include "utilities/copy.hpp"
64 #if INCLUDE_G1GC
65 #include "gc/g1/g1CollectedHeap.hpp"
66 #endif
67
68 #if INCLUDE_CDS_JAVA_HEAP
69
70 bool HeapShared::_closed_archive_heap_region_mapped = false;
71 bool HeapShared::_open_archive_heap_region_mapped = false;
72 bool HeapShared::_archive_heap_region_fixed = false;
73 address HeapShared::_narrow_oop_base;
74 int HeapShared::_narrow_oop_shift;
75 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
76
77 //
78 // If you add new entries to the following tables, you should know what you're doing!
79 //
80
81 // Entry fields for shareable subgraphs archived in the closed archive heap
82 // region. Warning: Objects in the subgraphs should not have reference fields
83 // assigned at runtime.
84 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
85 {"java/lang/Integer$IntegerCache", "archivedCache"},
86 {"java/lang/Long$LongCache", "archivedCache"},
87 {"java/lang/Byte$ByteCache", "archivedCache"},
88 {"java/lang/Short$ShortCache", "archivedCache"},
89 {"java/lang/Character$CharacterCache", "archivedCache"},
90 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
91 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
92 };
93 // Entry fields for subgraphs archived in the open archive heap region.
94 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
95 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
96 {"java/util/ImmutableCollections", "archivedObjects"},
97 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
98 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
99 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
100 };
101
102 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
103 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
104 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
105 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
106 {"java/lang/Module$ArchivedData", "archivedData"},
107 };
108
109 const static int num_closed_archive_subgraph_entry_fields =
110 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
111 const static int num_open_archive_subgraph_entry_fields =
112 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
113 const static int num_fmg_open_archive_subgraph_entry_fields =
114 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
115
116 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
117 narrowOop HeapShared::_roots_narrow;
118 OopHandle HeapShared::_roots;
119
120 ////////////////////////////////////////////////////////////////
121 //
122 // Java heap object archiving support
123 //
124 ////////////////////////////////////////////////////////////////
fixup_mapped_heap_regions()125 void HeapShared::fixup_mapped_heap_regions() {
126 FileMapInfo *mapinfo = FileMapInfo::current_info();
127 mapinfo->fixup_mapped_heap_regions();
128 set_archive_heap_region_fixed();
129 if (is_mapped()) {
130 _roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow));
131 if (!MetaspaceShared::use_full_module_graph()) {
132 // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
133 ClassLoaderDataShared::clear_archived_oops();
134 }
135 }
136 SystemDictionaryShared::update_archived_mirror_native_pointers();
137 }
138
oop_hash(oop const & p)139 unsigned HeapShared::oop_hash(oop const& p) {
140 assert(!p->mark().has_bias_pattern(),
141 "this object should never have been locked"); // so identity_hash won't safepoin
142 unsigned hash = (unsigned)p->identity_hash();
143 return hash;
144 }
145
reset_states(oop obj,TRAPS)146 static void reset_states(oop obj, TRAPS) {
147 Handle h_obj(THREAD, obj);
148 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
149 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
150 Symbol* method_sig = vmSymbols::void_method_signature();
151
152 while (klass != NULL) {
153 Method* method = klass->find_method(method_name, method_sig);
154 if (method != NULL) {
155 assert(method->is_private(), "must be");
156 if (log_is_enabled(Debug, cds)) {
157 ResourceMark rm(THREAD);
158 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string());
159 }
160 JavaValue result(T_VOID);
161 JavaCalls::call_special(&result, h_obj, klass,
162 method_name, method_sig, CHECK);
163 }
164 klass = klass->java_super();
165 }
166 }
167
reset_archived_object_states(TRAPS)168 void HeapShared::reset_archived_object_states(TRAPS) {
169 assert(DumpSharedSpaces, "dump-time only");
170 log_debug(cds)("Resetting platform loader");
171 reset_states(SystemDictionary::java_platform_loader(), CHECK);
172 log_debug(cds)("Resetting system loader");
173 reset_states(SystemDictionary::java_system_loader(), CHECK);
174 }
175
176 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
find_archived_heap_object(oop obj)177 oop HeapShared::find_archived_heap_object(oop obj) {
178 assert(DumpSharedSpaces, "dump-time only");
179 ArchivedObjectCache* cache = archived_object_cache();
180 oop* p = cache->get(obj);
181 if (p != NULL) {
182 return *p;
183 } else {
184 return NULL;
185 }
186 }
187
append_root(oop obj)188 int HeapShared::append_root(oop obj) {
189 assert(DumpSharedSpaces, "dump-time only");
190
191 // No GC should happen since we aren't scanning _pending_roots.
192 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
193
194 if (_pending_roots == NULL) {
195 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
196 }
197
198 return _pending_roots->append(obj);
199 }
200
roots()201 objArrayOop HeapShared::roots() {
202 if (DumpSharedSpaces) {
203 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
204 if (!is_heap_object_archiving_allowed()) {
205 return NULL;
206 }
207 } else {
208 assert(UseSharedSpaces, "must be");
209 }
210
211 objArrayOop roots = (objArrayOop)_roots.resolve();
212 assert(roots != NULL, "should have been initialized");
213 return roots;
214 }
215
set_roots(narrowOop roots)216 void HeapShared::set_roots(narrowOop roots) {
217 assert(UseSharedSpaces, "runtime only");
218 assert(open_archive_heap_region_mapped(), "must be");
219 _roots_narrow = roots;
220 }
221
222 // Returns an objArray that contains all the roots of the archived objects
get_root(int index,bool clear)223 oop HeapShared::get_root(int index, bool clear) {
224 assert(index >= 0, "sanity");
225 if (DumpSharedSpaces) {
226 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
227 assert(_pending_roots != NULL, "sanity");
228 return _pending_roots->at(index);
229 } else {
230 assert(UseSharedSpaces, "must be");
231 assert(!_roots.is_empty(), "must have loaded shared heap");
232 oop result = roots()->obj_at(index);
233 if (clear) {
234 clear_root(index);
235 }
236 return result;
237 }
238 }
239
clear_root(int index)240 void HeapShared::clear_root(int index) {
241 assert(index >= 0, "sanity");
242 assert(UseSharedSpaces, "must be");
243 if (open_archive_heap_region_mapped()) {
244 if (log_is_enabled(Debug, cds, heap)) {
245 oop old = roots()->obj_at(index);
246 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
247 }
248 roots()->obj_at_put(index, NULL);
249 }
250 }
251
archive_heap_object(oop obj)252 oop HeapShared::archive_heap_object(oop obj) {
253 assert(DumpSharedSpaces, "dump-time only");
254
255 oop ao = find_archived_heap_object(obj);
256 if (ao != NULL) {
257 // already archived
258 return ao;
259 }
260
261 int len = obj->size();
262 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
263 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
264 p2i(obj), (size_t)obj->size());
265 return NULL;
266 }
267
268 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
269 if (archived_oop != NULL) {
270 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
271 // Reinitialize markword to remove age/marking/locking/etc.
272 //
273 // We need to retain the identity_hash, because it may have been used by some hashtables
274 // in the shared heap. This also has the side effect of pre-initializing the
275 // identity_hash for all shared objects, so they are less likely to be written
276 // into during run time, increasing the potential of memory sharing.
277 int hash_original = obj->identity_hash();
278 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
279 assert(archived_oop->mark().is_unlocked(), "sanity");
280
281 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
282 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
283
284 ArchivedObjectCache* cache = archived_object_cache();
285 cache->put(obj, archived_oop);
286 if (log_is_enabled(Debug, cds, heap)) {
287 ResourceMark rm;
288 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
289 p2i(obj), p2i(archived_oop), obj->klass()->external_name());
290 }
291 } else {
292 log_error(cds, heap)(
293 "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
294 p2i(obj));
295 vm_direct_exit(-1,
296 err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
297 SIZE_FORMAT "M", MaxHeapSize/M));
298 }
299 return archived_oop;
300 }
301
archive_klass_objects()302 void HeapShared::archive_klass_objects() {
303 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
304 assert(klasses != NULL, "sanity");
305 for (int i = 0; i < klasses->length(); i++) {
306 Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
307
308 // archive mirror object
309 java_lang_Class::archive_mirror(k);
310
311 // archive the resolved_referenes array
312 if (k->is_instance_klass()) {
313 InstanceKlass* ik = InstanceKlass::cast(k);
314 ik->constants()->archive_resolved_references();
315 }
316 }
317 }
318
run_full_gc_in_vm_thread()319 void HeapShared::run_full_gc_in_vm_thread() {
320 if (is_heap_object_archiving_allowed()) {
321 // Avoid fragmentation while archiving heap objects.
322 // We do this inside a safepoint, so that no further allocation can happen after GC
323 // has finished.
324 if (GCLocker::is_active()) {
325 // Just checking for safety ...
326 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
327 // has been modified such that JNI code is executed in some clean up threads after
328 // we have finished class loading.
329 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
330 } else {
331 log_info(cds)("Run GC ...");
332 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
333 log_info(cds)("Run GC done");
334 }
335 }
336 }
337
archive_java_heap_objects(GrowableArray<MemRegion> * closed,GrowableArray<MemRegion> * open)338 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion>* closed,
339 GrowableArray<MemRegion>* open) {
340
341 G1HeapVerifier::verify_ready_for_archiving();
342
343 {
344 NoSafepointVerifier nsv;
345
346 // Cache for recording where the archived objects are copied to
347 create_archived_object_cache();
348
349 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
350 p2i(CompressedOops::begin()), p2i(CompressedOops::end()));
351 log_info(cds)("Dumping objects to closed archive heap region ...");
352 copy_closed_archive_heap_objects(closed);
353
354 log_info(cds)("Dumping objects to open archive heap region ...");
355 copy_open_archive_heap_objects(open);
356
357 destroy_archived_object_cache();
358 }
359
360 G1HeapVerifier::verify_archive_regions();
361 }
362
copy_closed_archive_heap_objects(GrowableArray<MemRegion> * closed_archive)363 void HeapShared::copy_closed_archive_heap_objects(
364 GrowableArray<MemRegion> * closed_archive) {
365 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
366
367 G1CollectedHeap::heap()->begin_archive_alloc_range();
368
369 // Archive interned string objects
370 StringTable::write_to_archive(_dumped_interned_strings);
371
372 archive_object_subgraphs(closed_archive_subgraph_entry_fields,
373 num_closed_archive_subgraph_entry_fields,
374 true /* is_closed_archive */,
375 false /* is_full_module_graph */);
376
377 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
378 os::vm_allocation_granularity());
379 }
380
copy_open_archive_heap_objects(GrowableArray<MemRegion> * open_archive)381 void HeapShared::copy_open_archive_heap_objects(
382 GrowableArray<MemRegion> * open_archive) {
383 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
384
385 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
386
387 java_lang_Class::archive_basic_type_mirrors();
388
389 archive_klass_objects();
390
391 archive_object_subgraphs(open_archive_subgraph_entry_fields,
392 num_open_archive_subgraph_entry_fields,
393 false /* is_closed_archive */,
394 false /* is_full_module_graph */);
395 if (MetaspaceShared::use_full_module_graph()) {
396 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
397 num_fmg_open_archive_subgraph_entry_fields,
398 false /* is_closed_archive */,
399 true /* is_full_module_graph */);
400 ClassLoaderDataShared::init_archived_oops();
401 }
402
403 copy_roots();
404
405 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
406 os::vm_allocation_granularity());
407 }
408
409 // Copy _pending_archive_roots into an objArray
copy_roots()410 void HeapShared::copy_roots() {
411 int length = _pending_roots != NULL ? _pending_roots->length() : 0;
412 int size = objArrayOopDesc::object_size(length);
413 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
414 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
415
416 memset(mem, 0, size * BytesPerWord);
417 {
418 // This is copied from MemAllocator::finish
419 if (UseBiasedLocking) {
420 oopDesc::set_mark(mem, k->prototype_header());
421 } else {
422 oopDesc::set_mark(mem, markWord::prototype());
423 }
424 oopDesc::release_set_klass(mem, k);
425 }
426 {
427 // This is copied from ObjArrayAllocator::initialize
428 arrayOopDesc::set_length(mem, length);
429 }
430
431 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
432 for (int i = 0; i < length; i++) {
433 roots()->obj_at_put(i, _pending_roots->at(i));
434 }
435 log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem);
436 }
437
init_narrow_oop_decoding(address base,int shift)438 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
439 _narrow_oop_base = base;
440 _narrow_oop_shift = shift;
441 }
442
443 //
444 // Subgraph archiving support
445 //
446 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
447 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
448
449 // Get the subgraph_info for Klass k. A new subgraph_info is created if
450 // there is no existing one for k. The subgraph_info records the relocated
451 // Klass* of the original k.
init_subgraph_info(Klass * k,bool is_full_module_graph)452 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
453 assert(DumpSharedSpaces, "dump time only");
454 bool created;
455 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
456 KlassSubGraphInfo* info =
457 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
458 &created);
459 assert(created, "must not initialize twice");
460 return info;
461 }
462
get_subgraph_info(Klass * k)463 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
464 assert(DumpSharedSpaces, "dump time only");
465 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
466 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
467 assert(info != NULL, "must have been initialized");
468 return info;
469 }
470
471 // Add an entry field to the current KlassSubGraphInfo.
add_subgraph_entry_field(int static_field_offset,oop v,bool is_closed_archive)472 void KlassSubGraphInfo::add_subgraph_entry_field(
473 int static_field_offset, oop v, bool is_closed_archive) {
474 assert(DumpSharedSpaces, "dump time only");
475 if (_subgraph_entry_fields == NULL) {
476 _subgraph_entry_fields =
477 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
478 }
479 _subgraph_entry_fields->append(static_field_offset);
480 _subgraph_entry_fields->append(HeapShared::append_root(v));
481 }
482
483 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
484 // Only objects of boot classes can be included in sub-graph.
add_subgraph_object_klass(Klass * orig_k)485 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
486 assert(DumpSharedSpaces, "dump time only");
487 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k);
488
489 if (_subgraph_object_klasses == NULL) {
490 _subgraph_object_klasses =
491 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
492 }
493
494 assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class");
495
496 if (_k == relocated_k) {
497 // Don't add the Klass containing the sub-graph to it's own klass
498 // initialization list.
499 return;
500 }
501
502 if (relocated_k->is_instance_klass()) {
503 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
504 "must be boot class");
505 // vmClasses::xxx_klass() are not updated, need to check
506 // the original Klass*
507 if (orig_k == vmClasses::String_klass() ||
508 orig_k == vmClasses::Object_klass()) {
509 // Initialized early during VM initialization. No need to be added
510 // to the sub-graph object class list.
511 return;
512 }
513 } else if (relocated_k->is_objArray_klass()) {
514 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
515 if (abk->is_instance_klass()) {
516 assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
517 "must be boot class");
518 }
519 if (relocated_k == Universe::objectArrayKlassObj()) {
520 // Initialized early during Universe::genesis. No need to be added
521 // to the list.
522 return;
523 }
524 } else {
525 assert(relocated_k->is_typeArray_klass(), "must be");
526 // Primitive type arrays are created early during Universe::genesis.
527 return;
528 }
529
530 if (log_is_enabled(Debug, cds, heap)) {
531 if (!_subgraph_object_klasses->contains(relocated_k)) {
532 ResourceMark rm;
533 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
534 }
535 }
536
537 _subgraph_object_klasses->append_if_missing(relocated_k);
538 _has_non_early_klasses |= is_non_early_klass(orig_k);
539 }
540
is_non_early_klass(Klass * k)541 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
542 if (k->is_objArray_klass()) {
543 k = ObjArrayKlass::cast(k)->bottom_klass();
544 }
545 if (k->is_instance_klass()) {
546 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
547 ResourceMark rm;
548 log_info(cds, heap)("non-early: %s", k->external_name());
549 return true;
550 } else {
551 return false;
552 }
553 } else {
554 return false;
555 }
556 }
557
558 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
init(KlassSubGraphInfo * info)559 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
560 _k = info->klass();
561 _entry_field_records = NULL;
562 _subgraph_object_klasses = NULL;
563 _is_full_module_graph = info->is_full_module_graph();
564
565 if (_is_full_module_graph) {
566 // Consider all classes referenced by the full module graph as early -- we will be
567 // allocating objects of these classes during JVMTI early phase, so they cannot
568 // be processed by (non-early) JVMTI ClassFileLoadHook
569 _has_non_early_klasses = false;
570 } else {
571 _has_non_early_klasses = info->has_non_early_klasses();
572 }
573
574 if (_has_non_early_klasses) {
575 ResourceMark rm;
576 log_info(cds, heap)(
577 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
578 _k->external_name());
579 }
580
581 // populate the entry fields
582 GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
583 if (entry_fields != NULL) {
584 int num_entry_fields = entry_fields->length();
585 assert(num_entry_fields % 2 == 0, "sanity");
586 _entry_field_records =
587 ArchiveBuilder::new_ro_array<int>(num_entry_fields);
588 for (int i = 0 ; i < num_entry_fields; i++) {
589 _entry_field_records->at_put(i, entry_fields->at(i));
590 }
591 }
592
593 // the Klasses of the objects in the sub-graphs
594 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
595 if (subgraph_object_klasses != NULL) {
596 int num_subgraphs_klasses = subgraph_object_klasses->length();
597 _subgraph_object_klasses =
598 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
599 for (int i = 0; i < num_subgraphs_klasses; i++) {
600 Klass* subgraph_k = subgraph_object_klasses->at(i);
601 if (log_is_enabled(Info, cds, heap)) {
602 ResourceMark rm;
603 log_info(cds, heap)(
604 "Archived object klass %s (%2d) => %s",
605 _k->external_name(), i, subgraph_k->external_name());
606 }
607 _subgraph_object_klasses->at_put(i, subgraph_k);
608 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
609 }
610 }
611
612 ArchivePtrMarker::mark_pointer(&_k);
613 ArchivePtrMarker::mark_pointer(&_entry_field_records);
614 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
615 }
616
617 struct CopyKlassSubGraphInfoToArchive : StackObj {
618 CompactHashtableWriter* _writer;
CopyKlassSubGraphInfoToArchiveCopyKlassSubGraphInfoToArchive619 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
620
do_entryCopyKlassSubGraphInfoToArchive621 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
622 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
623 ArchivedKlassSubGraphInfoRecord* record =
624 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
625 record->init(&info);
626
627 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);
628 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
629 _writer->add(hash, delta);
630 }
631 return true; // keep on iterating
632 }
633 };
634
635 // Build the records of archived subgraph infos, which include:
636 // - Entry points to all subgraphs from the containing class mirror. The entry
637 // points are static fields in the mirror. For each entry point, the field
638 // offset, value and is_closed_archive flag are recorded in the sub-graph
639 // info. The value is stored back to the corresponding field at runtime.
640 // - A list of klasses that need to be loaded/initialized before archived
641 // java object sub-graph can be accessed at runtime.
write_subgraph_info_table()642 void HeapShared::write_subgraph_info_table() {
643 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
644 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
645 CompactHashtableStats stats;
646
647 _run_time_subgraph_info_table.reset();
648
649 CompactHashtableWriter writer(d_table->_count, &stats);
650 CopyKlassSubGraphInfoToArchive copy(&writer);
651 d_table->iterate(©);
652
653 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
654 }
655
serialize_subgraph_info_table_header(SerializeClosure * soc)656 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
657 _run_time_subgraph_info_table.serialize_header(soc);
658 }
659
verify_the_heap(Klass * k,const char * which)660 static void verify_the_heap(Klass* k, const char* which) {
661 if (VerifyArchivedFields) {
662 ResourceMark rm;
663 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
664 which, k->external_name());
665
666 VM_Verify verify_op;
667 VMThread::execute(&verify_op);
668
669 if (!FLAG_IS_DEFAULT(VerifyArchivedFields)) {
670 // If VerifyArchivedFields has a non-default value (e.g., specified on the command-line), do
671 // more expensive checks.
672 if (is_init_completed()) {
673 FlagSetting fs1(VerifyBeforeGC, true);
674 FlagSetting fs2(VerifyDuringGC, true);
675 FlagSetting fs3(VerifyAfterGC, true);
676 Universe::heap()->collect(GCCause::_java_lang_system_gc);
677 }
678 }
679 }
680 }
681
682 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
683 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
684 //
685 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
686 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
687 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
resolve_classes(JavaThread * THREAD)688 void HeapShared::resolve_classes(JavaThread* THREAD) {
689 if (!is_mapped()) {
690 return; // nothing to do
691 }
692 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields,
693 num_closed_archive_subgraph_entry_fields,
694 THREAD);
695 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields,
696 num_open_archive_subgraph_entry_fields,
697 THREAD);
698 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields,
699 num_fmg_open_archive_subgraph_entry_fields,
700 THREAD);
701 }
702
resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],int num,JavaThread * THREAD)703 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],
704 int num, JavaThread* THREAD) {
705 for (int i = 0; i < num; i++) {
706 ArchivableStaticFieldInfo* info = &fields[i];
707 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
708 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
709 assert(k != NULL && k->is_shared_boot_class(), "sanity");
710 resolve_classes_for_subgraph_of(k, THREAD);
711 }
712 }
713
resolve_classes_for_subgraph_of(Klass * k,JavaThread * THREAD)714 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) {
715 ExceptionMark em(THREAD);
716 const ArchivedKlassSubGraphInfoRecord* record =
717 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
718 if (HAS_PENDING_EXCEPTION) {
719 CLEAR_PENDING_EXCEPTION;
720 }
721 if (record == NULL) {
722 clear_archived_roots_of(k);
723 }
724 }
725
initialize_from_archived_subgraph(Klass * k,JavaThread * THREAD)726 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) {
727 if (!is_mapped()) {
728 return; // nothing to do
729 }
730
731 ExceptionMark em(THREAD);
732 const ArchivedKlassSubGraphInfoRecord* record =
733 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
734
735 if (HAS_PENDING_EXCEPTION) {
736 CLEAR_PENDING_EXCEPTION;
737 // None of the field value will be set if there was an exception when initializing the classes.
738 // The java code will not see any of the archived objects in the
739 // subgraphs referenced from k in this case.
740 return;
741 }
742
743 if (record != NULL) {
744 init_archived_fields_for(k, record);
745 }
746 }
747
748 const ArchivedKlassSubGraphInfoRecord*
resolve_or_init_classes_for_subgraph_of(Klass * k,bool do_init,TRAPS)749 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
750 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
751
752 if (!k->is_shared()) {
753 return NULL;
754 }
755 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
756 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
757
758 // Initialize from archived data. Currently this is done only
759 // during VM initialization time. No lock is needed.
760 if (record != NULL) {
761 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
762 if (log_is_enabled(Info, cds, heap)) {
763 ResourceMark rm(THREAD);
764 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
765 k->external_name());
766 }
767 return NULL;
768 }
769
770 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
771 if (log_is_enabled(Info, cds, heap)) {
772 ResourceMark rm(THREAD);
773 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
774 k->external_name());
775 }
776 return NULL;
777 }
778
779 resolve_or_init(k, do_init, CHECK_NULL);
780
781 // Load/link/initialize the klasses of the objects in the subgraph.
782 // NULL class loader is used.
783 Array<Klass*>* klasses = record->subgraph_object_klasses();
784 if (klasses != NULL) {
785 for (int i = 0; i < klasses->length(); i++) {
786 Klass* klass = klasses->at(i);
787 if (!klass->is_shared()) {
788 return NULL;
789 }
790 resolve_or_init(klass, do_init, CHECK_NULL);
791 }
792 }
793 }
794
795 return record;
796 }
797
resolve_or_init(Klass * k,bool do_init,TRAPS)798 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
799 if (!do_init) {
800 if (k->class_loader_data() == NULL) {
801 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
802 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
803 }
804 } else {
805 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
806 if (k->is_instance_klass()) {
807 InstanceKlass* ik = InstanceKlass::cast(k);
808 ik->initialize(CHECK);
809 } else if (k->is_objArray_klass()) {
810 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
811 oak->initialize(CHECK);
812 }
813 }
814 }
815
init_archived_fields_for(Klass * k,const ArchivedKlassSubGraphInfoRecord * record)816 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
817 verify_the_heap(k, "before");
818
819 // Load the subgraph entry fields from the record and store them back to
820 // the corresponding fields within the mirror.
821 oop m = k->java_mirror();
822 Array<int>* entry_field_records = record->entry_field_records();
823 if (entry_field_records != NULL) {
824 int efr_len = entry_field_records->length();
825 assert(efr_len % 2 == 0, "sanity");
826 for (int i = 0; i < efr_len; i += 2) {
827 int field_offset = entry_field_records->at(i);
828 int root_index = entry_field_records->at(i+1);
829 oop v = get_root(root_index, /*clear=*/true);
830 m->obj_field_put(field_offset, v);
831 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
832 }
833
834 // Done. Java code can see the archived sub-graphs referenced from k's
835 // mirror after this point.
836 if (log_is_enabled(Info, cds, heap)) {
837 ResourceMark rm;
838 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
839 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
840 }
841 }
842
843 verify_the_heap(k, "after ");
844 }
845
clear_archived_roots_of(Klass * k)846 void HeapShared::clear_archived_roots_of(Klass* k) {
847 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
848 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
849 if (record != NULL) {
850 Array<int>* entry_field_records = record->entry_field_records();
851 if (entry_field_records != NULL) {
852 int efr_len = entry_field_records->length();
853 assert(efr_len % 2 == 0, "sanity");
854 for (int i = 0; i < efr_len; i += 2) {
855 int root_index = entry_field_records->at(i+1);
856 clear_root(root_index);
857 }
858 }
859 }
860 }
861
862 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
863 int _level;
864 bool _is_closed_archive;
865 bool _record_klasses_only;
866 KlassSubGraphInfo* _subgraph_info;
867 oop _orig_referencing_obj;
868 oop _archived_referencing_obj;
869 public:
WalkOopAndArchiveClosure(int level,bool is_closed_archive,bool record_klasses_only,KlassSubGraphInfo * subgraph_info,oop orig,oop archived)870 WalkOopAndArchiveClosure(int level,
871 bool is_closed_archive,
872 bool record_klasses_only,
873 KlassSubGraphInfo* subgraph_info,
874 oop orig, oop archived) :
875 _level(level), _is_closed_archive(is_closed_archive),
876 _record_klasses_only(record_klasses_only),
877 _subgraph_info(subgraph_info),
878 _orig_referencing_obj(orig), _archived_referencing_obj(archived) {}
do_oop(narrowOop * p)879 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
do_oop(oop * p)880 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
881
882 protected:
do_oop_work(T * p)883 template <class T> void do_oop_work(T *p) {
884 oop obj = RawAccess<>::oop_load(p);
885 if (!CompressedOops::is_null(obj)) {
886 assert(!HeapShared::is_archived_object(obj),
887 "original objects must not point to archived objects");
888
889 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
890 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
891
892 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
893 ResourceMark rm;
894 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level,
895 _orig_referencing_obj->klass()->external_name(), field_delta,
896 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
897 LogTarget(Trace, cds, heap) log;
898 LogStream out(log);
899 obj->print_on(&out);
900 }
901
902 oop archived = HeapShared::archive_reachable_objects_from(
903 _level + 1, _subgraph_info, obj, _is_closed_archive);
904 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
905 assert(HeapShared::is_archived_object(archived), "must be");
906
907 if (!_record_klasses_only) {
908 // Update the reference in the archived copy of the referencing object.
909 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
910 _level, p2i(new_p), p2i(obj), p2i(archived));
911 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
912 }
913 }
914 }
915 };
916
check_closed_archive_heap_region_object(InstanceKlass * k)917 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k) {
918 // Check fields in the object
919 for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
920 if (!fs.access_flags().is_static()) {
921 BasicType ft = fs.field_descriptor().field_type();
922 if (!fs.access_flags().is_final() && is_reference_type(ft)) {
923 ResourceMark rm;
924 log_warning(cds, heap)(
925 "Please check reference field in %s instance in closed archive heap region: %s %s",
926 k->external_name(), (fs.name())->as_C_string(),
927 (fs.signature())->as_C_string());
928 }
929 }
930 }
931 }
932
check_module_oop(oop orig_module_obj)933 void HeapShared::check_module_oop(oop orig_module_obj) {
934 assert(DumpSharedSpaces, "must be");
935 assert(java_lang_Module::is_instance(orig_module_obj), "must be");
936 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
937 if (orig_module_ent == NULL) {
938 // These special Module objects are created in Java code. They are not
939 // defined via Modules::define_module(), so they don't have a ModuleEntry:
940 // java.lang.Module::ALL_UNNAMED_MODULE
941 // java.lang.Module::EVERYONE_MODULE
942 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
943 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed");
944 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj));
945 } else {
946 ClassLoaderData* loader_data = orig_module_ent->loader_data();
947 assert(loader_data->is_builtin_class_loader_data(), "must be");
948 }
949 }
950
951
952 // (1) If orig_obj has not been archived yet, archive it.
953 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
954 // trace all objects that are reachable from it, and make sure these objects are archived.
955 // (3) Record the klasses of all orig_obj and all reachable objects.
archive_reachable_objects_from(int level,KlassSubGraphInfo * subgraph_info,oop orig_obj,bool is_closed_archive)956 oop HeapShared::archive_reachable_objects_from(int level,
957 KlassSubGraphInfo* subgraph_info,
958 oop orig_obj,
959 bool is_closed_archive) {
960 assert(orig_obj != NULL, "must be");
961 assert(!is_archived_object(orig_obj), "sanity");
962
963 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
964 // This object has injected fields that cannot be supported easily, so we disallow them for now.
965 // If you get an error here, you probably made a change in the JDK library that has added
966 // these objects that are referenced (directly or indirectly) by static fields.
967 ResourceMark rm;
968 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
969 vm_direct_exit(1);
970 }
971
972 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
973 // them as Klass::_archived_mirror because they need to be specially restored at run time.
974 //
975 // If you get an error here, you probably made a change in the JDK library that has added a Class
976 // object that is referenced (directly or indirectly) by static fields.
977 if (java_lang_Class::is_instance(orig_obj)) {
978 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
979 vm_direct_exit(1);
980 }
981
982 oop archived_obj = find_archived_heap_object(orig_obj);
983 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
984 // To save time, don't walk strings that are already archived. They just contain
985 // pointers to a type array, whose klass doesn't need to be recorded.
986 return archived_obj;
987 }
988
989 if (has_been_seen_during_subgraph_recording(orig_obj)) {
990 // orig_obj has already been archived and traced. Nothing more to do.
991 return archived_obj;
992 } else {
993 set_has_been_seen_during_subgraph_recording(orig_obj);
994 }
995
996 bool record_klasses_only = (archived_obj != NULL);
997 if (archived_obj == NULL) {
998 ++_num_new_archived_objs;
999 archived_obj = archive_heap_object(orig_obj);
1000 if (archived_obj == NULL) {
1001 // Skip archiving the sub-graph referenced from the current entry field.
1002 ResourceMark rm;
1003 log_error(cds, heap)(
1004 "Cannot archive the sub-graph referenced from %s object ("
1005 PTR_FORMAT ") size %d, skipped.",
1006 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1007 if (level == 1) {
1008 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1009 // as the Java code will take care of initializing this field dynamically.
1010 return NULL;
1011 } else {
1012 // We don't know how to handle an object that has been archived, but some of its reachable
1013 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1014 // we have a real use case.
1015 vm_direct_exit(1);
1016 }
1017 }
1018
1019 if (java_lang_Module::is_instance(orig_obj)) {
1020 check_module_oop(orig_obj);
1021 java_lang_Module::set_module_entry(archived_obj, NULL);
1022 java_lang_Module::set_loader(archived_obj, NULL);
1023 } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
1024 // class_data will be restored explicitly at run time.
1025 guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
1026 orig_obj == SystemDictionary::java_system_loader() ||
1027 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be");
1028 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
1029 }
1030 }
1031
1032 assert(archived_obj != NULL, "must be");
1033 Klass *orig_k = orig_obj->klass();
1034 subgraph_info->add_subgraph_object_klass(orig_k);
1035
1036 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
1037 subgraph_info, orig_obj, archived_obj);
1038 orig_obj->oop_iterate(&walker);
1039 if (is_closed_archive && orig_k->is_instance_klass()) {
1040 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k));
1041 }
1042 return archived_obj;
1043 }
1044
1045 //
1046 // Start from the given static field in a java mirror and archive the
1047 // complete sub-graph of java heap objects that are reached directly
1048 // or indirectly from the starting object by following references.
1049 // Sub-graph archiving restrictions (current):
1050 //
1051 // - All classes of objects in the archived sub-graph (including the
1052 // entry class) must be boot class only.
1053 // - No java.lang.Class instance (java mirror) can be included inside
1054 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1055 //
1056 // The Java heap object sub-graph archiving process (see
1057 // WalkOopAndArchiveClosure):
1058 //
1059 // 1) Java object sub-graph archiving starts from a given static field
1060 // within a Class instance (java mirror). If the static field is a
1061 // refererence field and points to a non-null java object, proceed to
1062 // the next step.
1063 //
1064 // 2) Archives the referenced java object. If an archived copy of the
1065 // current object already exists, updates the pointer in the archived
1066 // copy of the referencing object to point to the current archived object.
1067 // Otherwise, proceed to the next step.
1068 //
1069 // 3) Follows all references within the current java object and recursively
1070 // archive the sub-graph of objects starting from each reference.
1071 //
1072 // 4) Updates the pointer in the archived copy of referencing object to
1073 // point to the current archived object.
1074 //
1075 // 5) The Klass of the current java object is added to the list of Klasses
1076 // for loading and initialzing before any object in the archived graph can
1077 // be accessed at runtime.
1078 //
archive_reachable_objects_from_static_field(InstanceKlass * k,const char * klass_name,int field_offset,const char * field_name,bool is_closed_archive)1079 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1080 const char* klass_name,
1081 int field_offset,
1082 const char* field_name,
1083 bool is_closed_archive) {
1084 assert(DumpSharedSpaces, "dump time only");
1085 assert(k->is_shared_boot_class(), "must be boot class");
1086
1087 oop m = k->java_mirror();
1088
1089 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1090 oop f = m->obj_field(field_offset);
1091
1092 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
1093
1094 if (!CompressedOops::is_null(f)) {
1095 if (log_is_enabled(Trace, cds, heap)) {
1096 LogTarget(Trace, cds, heap) log;
1097 LogStream out(log);
1098 f->print_on(&out);
1099 }
1100
1101 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
1102
1103 if (af == NULL) {
1104 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
1105 klass_name, field_name);
1106 } else {
1107 // Note: the field value is not preserved in the archived mirror.
1108 // Record the field as a new subGraph entry point. The recorded
1109 // information is restored from the archive at runtime.
1110 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
1111 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
1112 }
1113 } else {
1114 // The field contains null, we still need to record the entry point,
1115 // so it can be restored at runtime.
1116 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
1117 }
1118 }
1119
1120 #ifndef PRODUCT
1121 class VerifySharedOopClosure: public BasicOopIterateClosure {
1122 private:
1123 bool _is_archived;
1124
1125 public:
VerifySharedOopClosure(bool is_archived)1126 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
1127
do_oop(narrowOop * p)1128 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
do_oop(oop * p)1129 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1130
1131 protected:
do_oop_work(T * p)1132 template <class T> void do_oop_work(T *p) {
1133 oop obj = RawAccess<>::oop_load(p);
1134 if (!CompressedOops::is_null(obj)) {
1135 HeapShared::verify_reachable_objects_from(obj, _is_archived);
1136 }
1137 }
1138 };
1139
verify_subgraph_from_static_field(InstanceKlass * k,int field_offset)1140 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1141 assert(DumpSharedSpaces, "dump time only");
1142 assert(k->is_shared_boot_class(), "must be boot class");
1143
1144 oop m = k->java_mirror();
1145 oop f = m->obj_field(field_offset);
1146 if (!CompressedOops::is_null(f)) {
1147 verify_subgraph_from(f);
1148 }
1149 }
1150
verify_subgraph_from(oop orig_obj)1151 void HeapShared::verify_subgraph_from(oop orig_obj) {
1152 oop archived_obj = find_archived_heap_object(orig_obj);
1153 if (archived_obj == NULL) {
1154 // It's OK for the root of a subgraph to be not archived. See comments in
1155 // archive_reachable_objects_from().
1156 return;
1157 }
1158
1159 // Verify that all objects reachable from orig_obj are archived.
1160 init_seen_objects_table();
1161 verify_reachable_objects_from(orig_obj, false);
1162 delete_seen_objects_table();
1163
1164 // Note: we could also verify that all objects reachable from the archived
1165 // copy of orig_obj can only point to archived objects, with:
1166 // init_seen_objects_table();
1167 // verify_reachable_objects_from(archived_obj, true);
1168 // init_seen_objects_table();
1169 // but that's already done in G1HeapVerifier::verify_archive_regions so we
1170 // won't do it here.
1171 }
1172
verify_reachable_objects_from(oop obj,bool is_archived)1173 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
1174 _num_total_verifications ++;
1175 if (!has_been_seen_during_subgraph_recording(obj)) {
1176 set_has_been_seen_during_subgraph_recording(obj);
1177
1178 if (is_archived) {
1179 assert(is_archived_object(obj), "must be");
1180 assert(find_archived_heap_object(obj) == NULL, "must be");
1181 } else {
1182 assert(!is_archived_object(obj), "must be");
1183 assert(find_archived_heap_object(obj) != NULL, "must be");
1184 }
1185
1186 VerifySharedOopClosure walker(is_archived);
1187 obj->oop_iterate(&walker);
1188 }
1189 }
1190 #endif
1191
1192 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
1193 int HeapShared::_num_new_walked_objs;
1194 int HeapShared::_num_new_archived_objs;
1195 int HeapShared::_num_old_recorded_klasses;
1196
1197 int HeapShared::_num_total_subgraph_recordings = 0;
1198 int HeapShared::_num_total_walked_objs = 0;
1199 int HeapShared::_num_total_archived_objs = 0;
1200 int HeapShared::_num_total_recorded_klasses = 0;
1201 int HeapShared::_num_total_verifications = 0;
1202
has_been_seen_during_subgraph_recording(oop obj)1203 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1204 return _seen_objects_table->get(obj) != NULL;
1205 }
1206
set_has_been_seen_during_subgraph_recording(oop obj)1207 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1208 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1209 _seen_objects_table->put(obj, true);
1210 ++ _num_new_walked_objs;
1211 }
1212
start_recording_subgraph(InstanceKlass * k,const char * class_name,bool is_full_module_graph)1213 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1214 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
1215 init_subgraph_info(k, is_full_module_graph);
1216 init_seen_objects_table();
1217 _num_new_walked_objs = 0;
1218 _num_new_archived_objs = 0;
1219 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1220 }
1221
done_recording_subgraph(InstanceKlass * k,const char * class_name)1222 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1223 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1224 _num_old_recorded_klasses;
1225 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
1226 "walked %d objs, archived %d new objs, recorded %d classes",
1227 class_name, _num_new_walked_objs, _num_new_archived_objs,
1228 num_new_recorded_klasses);
1229
1230 delete_seen_objects_table();
1231
1232 _num_total_subgraph_recordings ++;
1233 _num_total_walked_objs += _num_new_walked_objs;
1234 _num_total_archived_objs += _num_new_archived_objs;
1235 _num_total_recorded_klasses += num_new_recorded_klasses;
1236 }
1237
1238 class ArchivableStaticFieldFinder: public FieldClosure {
1239 InstanceKlass* _ik;
1240 Symbol* _field_name;
1241 bool _found;
1242 int _offset;
1243 public:
ArchivableStaticFieldFinder(InstanceKlass * ik,Symbol * field_name)1244 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1245 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1246
do_field(fieldDescriptor * fd)1247 virtual void do_field(fieldDescriptor* fd) {
1248 if (fd->name() == _field_name) {
1249 assert(!_found, "fields cannot be overloaded");
1250 assert(is_reference_type(fd->field_type()), "can archive only fields that are references");
1251 _found = true;
1252 _offset = fd->offset();
1253 }
1254 }
found()1255 bool found() { return _found; }
offset()1256 int offset() { return _offset; }
1257 };
1258
init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],int num,TRAPS)1259 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1260 int num, TRAPS) {
1261 for (int i = 0; i < num; i++) {
1262 ArchivableStaticFieldInfo* info = &fields[i];
1263 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1264 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name);
1265
1266 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK);
1267 InstanceKlass* ik = InstanceKlass::cast(k);
1268 assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1269 "Only support boot classes");
1270 ik->initialize(CHECK);
1271
1272 ArchivableStaticFieldFinder finder(ik, field_name);
1273 ik->do_local_static_fields(&finder);
1274 assert(finder.found(), "field must exist");
1275
1276 info->klass = ik;
1277 info->offset = finder.offset();
1278 }
1279 }
1280
init_subgraph_entry_fields(TRAPS)1281 void HeapShared::init_subgraph_entry_fields(TRAPS) {
1282 assert(is_heap_object_archiving_allowed(), "Sanity check");
1283 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1284 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
1285 num_closed_archive_subgraph_entry_fields,
1286 CHECK);
1287 init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
1288 num_open_archive_subgraph_entry_fields,
1289 CHECK);
1290 if (MetaspaceShared::use_full_module_graph()) {
1291 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields,
1292 num_fmg_open_archive_subgraph_entry_fields,
1293 CHECK);
1294 }
1295 }
1296
init_for_dumping(TRAPS)1297 void HeapShared::init_for_dumping(TRAPS) {
1298 if (is_heap_object_archiving_allowed()) {
1299 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
1300 init_subgraph_entry_fields(CHECK);
1301 }
1302 }
1303
archive_object_subgraphs(ArchivableStaticFieldInfo fields[],int num,bool is_closed_archive,bool is_full_module_graph)1304 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1305 int num, bool is_closed_archive,
1306 bool is_full_module_graph) {
1307 _num_total_subgraph_recordings = 0;
1308 _num_total_walked_objs = 0;
1309 _num_total_archived_objs = 0;
1310 _num_total_recorded_klasses = 0;
1311 _num_total_verifications = 0;
1312
1313 // For each class X that has one or more archived fields:
1314 // [1] Dump the subgraph of each archived field
1315 // [2] Create a list of all the class of the objects that can be reached
1316 // by any of these static fields.
1317 // At runtime, these classes are initialized before X's archived fields
1318 // are restored by HeapShared::initialize_from_archived_subgraph().
1319 int i;
1320 for (i = 0; i < num; ) {
1321 ArchivableStaticFieldInfo* info = &fields[i];
1322 const char* klass_name = info->klass_name;
1323 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1324
1325 // If you have specified consecutive fields of the same klass in
1326 // fields[], these will be archived in the same
1327 // {start_recording_subgraph ... done_recording_subgraph} pass to
1328 // save time.
1329 for (; i < num; i++) {
1330 ArchivableStaticFieldInfo* f = &fields[i];
1331 if (f->klass_name != klass_name) {
1332 break;
1333 }
1334
1335 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1336 f->offset, f->field_name,
1337 is_closed_archive);
1338 }
1339 done_recording_subgraph(info->klass, klass_name);
1340 }
1341
1342 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1343 is_closed_archive ? "closed" : "open",
1344 _num_total_subgraph_recordings);
1345 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
1346 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
1347 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
1348
1349 #ifndef PRODUCT
1350 for (int i = 0; i < num; i++) {
1351 ArchivableStaticFieldInfo* f = &fields[i];
1352 verify_subgraph_from_static_field(f->klass, f->offset);
1353 }
1354 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
1355 #endif
1356 }
1357
1358 // Not all the strings in the global StringTable are dumped into the archive, because
1359 // some of those strings may be only referenced by classes that are excluded from
1360 // the archive. We need to explicitly mark the strings that are:
1361 // [1] used by classes that WILL be archived;
1362 // [2] included in the SharedArchiveConfigFile.
add_to_dumped_interned_strings(oop string)1363 void HeapShared::add_to_dumped_interned_strings(oop string) {
1364 assert_at_safepoint(); // DumpedInternedStrings uses raw oops
1365 bool created;
1366 _dumped_interned_strings->put_if_absent(string, true, &created);
1367 }
1368
1369 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1370 // region. This way we can quickly relocate all the pointers without using
1371 // BasicOopIterateClosure at runtime.
1372 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1373 narrowOop* _start;
1374 BitMap *_oopmap;
1375 int _num_total_oops;
1376 int _num_null_oops;
1377 public:
FindEmbeddedNonNullPointers(narrowOop * start,BitMap * oopmap)1378 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
1379 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {}
1380
do_oop(narrowOop * p)1381 virtual void do_oop(narrowOop* p) {
1382 _num_total_oops ++;
1383 narrowOop v = *p;
1384 if (!CompressedOops::is_null(v)) {
1385 size_t idx = p - _start;
1386 _oopmap->set_bit(idx);
1387 } else {
1388 _num_null_oops ++;
1389 }
1390 }
do_oop(oop * p)1391 virtual void do_oop(oop *p) {
1392 ShouldNotReachHere();
1393 }
num_total_oops() const1394 int num_total_oops() const { return _num_total_oops; }
num_null_oops() const1395 int num_null_oops() const { return _num_null_oops; }
1396 };
1397
calculate_oopmap(MemRegion region)1398 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1399 assert(UseCompressedOops, "must be");
1400 size_t num_bits = region.byte_size() / sizeof(narrowOop);
1401 ResourceBitMap oopmap(num_bits);
1402
1403 HeapWord* p = region.start();
1404 HeapWord* end = region.end();
1405 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1406 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
1407
1408 int num_objs = 0;
1409 while (p < end) {
1410 oop o = cast_to_oop(p);
1411 o->oop_iterate(&finder);
1412 p += o->size();
1413 if (DumpSharedSpaces) {
1414 builder->relocate_klass_ptr(o);
1415 }
1416 ++ num_objs;
1417 }
1418
1419 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1420 num_objs, finder.num_total_oops(), finder.num_null_oops());
1421 return oopmap;
1422 }
1423
1424 // Patch all the embedded oop pointers inside an archived heap region,
1425 // to be consistent with the runtime oop encoding.
1426 class PatchEmbeddedPointers: public BitMapClosure {
1427 narrowOop* _start;
1428
1429 public:
PatchEmbeddedPointers(narrowOop * start)1430 PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1431
do_bit(size_t offset)1432 bool do_bit(size_t offset) {
1433 narrowOop* p = _start + offset;
1434 narrowOop v = *p;
1435 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1436 oop o = HeapShared::decode_from_archive(v);
1437 RawAccess<IS_NOT_NULL>::oop_store(p, o);
1438 return true;
1439 }
1440 };
1441
patch_archived_heap_embedded_pointers(MemRegion region,address oopmap,size_t oopmap_size_in_bits)1442 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap,
1443 size_t oopmap_size_in_bits) {
1444 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1445
1446 #ifndef PRODUCT
1447 ResourceMark rm;
1448 ResourceBitMap checkBm = calculate_oopmap(region);
1449 assert(bm.is_same(checkBm), "sanity");
1450 #endif
1451
1452 PatchEmbeddedPointers patcher((narrowOop*)region.start());
1453 bm.iterate(&patcher);
1454 }
1455
1456 #endif // INCLUDE_CDS_JAVA_HEAP
1457