1 /*
2  * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/classListParser.hpp"
27 #include "classfile/classListWriter.hpp"
28 #include "classfile/systemDictionaryShared.hpp"
29 #include "interpreter/bootstrapInfo.hpp"
30 #include "memory/archiveUtils.hpp"
31 #include "memory/dynamicArchive.hpp"
32 #include "memory/filemap.hpp"
33 #include "memory/heapShared.inline.hpp"
34 #include "memory/metaspace.hpp"
35 #include "memory/metaspaceShared.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/compressedOops.inline.hpp"
38 #include "utilities/bitMap.inline.hpp"
39 
40 CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
41 address* ArchivePtrMarker::_ptr_base;
42 address* ArchivePtrMarker::_ptr_end;
43 bool ArchivePtrMarker::_compacted;
44 
initialize(CHeapBitMap * ptrmap,address * ptr_base,address * ptr_end)45 void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) {
46   assert(_ptrmap == NULL, "initialize only once");
47   _ptr_base = ptr_base;
48   _ptr_end = ptr_end;
49   _compacted = false;
50   _ptrmap = ptrmap;
51 
52   // Use this as initial guesstimate. We should need less space in the
53   // archive, but if we're wrong the bitmap will be expanded automatically.
54   size_t estimated_archive_size = MetaspaceGC::capacity_until_GC();
55   // But set it smaller in debug builds so we always test the expansion code.
56   // (Default archive is about 12MB).
57   DEBUG_ONLY(estimated_archive_size = 6 * M);
58 
59   // We need one bit per pointer in the archive.
60   _ptrmap->initialize(estimated_archive_size / sizeof(intptr_t));
61 }
62 
mark_pointer(address * ptr_loc)63 void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
64   assert(_ptrmap != NULL, "not initialized");
65   assert(!_compacted, "cannot mark anymore");
66 
67   if (_ptr_base <= ptr_loc && ptr_loc < _ptr_end) {
68     address value = *ptr_loc;
69     // We don't want any pointer that points to very bottom of the archive, otherwise when
70     // MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer
71     // to nothing (NULL) vs a pointer to an objects that happens to be at the very bottom
72     // of the archive.
73     assert(value != (address)_ptr_base, "don't point to the bottom of the archive");
74 
75     if (value != NULL) {
76       assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
77       size_t idx = ptr_loc - _ptr_base;
78       if (_ptrmap->size() <= idx) {
79         _ptrmap->resize((idx + 1) * 2);
80       }
81       assert(idx < _ptrmap->size(), "must be");
82       _ptrmap->set_bit(idx);
83       //tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
84     }
85   }
86 }
87 
clear_pointer(address * ptr_loc)88 void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
89   assert(_ptrmap != NULL, "not initialized");
90   assert(!_compacted, "cannot clear anymore");
91 
92   assert(_ptr_base <= ptr_loc && ptr_loc < _ptr_end, "must be");
93   assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
94   size_t idx = ptr_loc - _ptr_base;
95   assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
96   _ptrmap->clear_bit(idx);
97   //tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
98 }
99 
100 class ArchivePtrBitmapCleaner: public BitMapClosure {
101   CHeapBitMap* _ptrmap;
102   address* _ptr_base;
103   address  _relocatable_base;
104   address  _relocatable_end;
105   size_t   _max_non_null_offset;
106 
107 public:
ArchivePtrBitmapCleaner(CHeapBitMap * ptrmap,address * ptr_base,address relocatable_base,address relocatable_end)108   ArchivePtrBitmapCleaner(CHeapBitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) :
109     _ptrmap(ptrmap), _ptr_base(ptr_base),
110     _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {}
111 
do_bit(size_t offset)112   bool do_bit(size_t offset) {
113     address* ptr_loc = _ptr_base + offset;
114     address  ptr_value = *ptr_loc;
115     if (ptr_value != NULL) {
116       assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!");
117       if (_max_non_null_offset < offset) {
118         _max_non_null_offset = offset;
119       }
120     } else {
121       _ptrmap->clear_bit(offset);
122       DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT  "] -> NULL @ " SIZE_FORMAT_W(9), p2i(ptr_loc), offset));
123     }
124 
125     return true;
126   }
127 
max_non_null_offset() const128   size_t max_non_null_offset() const { return _max_non_null_offset; }
129 };
130 
compact(address relocatable_base,address relocatable_end)131 void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
132   assert(!_compacted, "cannot compact again");
133   ArchivePtrBitmapCleaner cleaner(_ptrmap, _ptr_base, relocatable_base, relocatable_end);
134   _ptrmap->iterate(&cleaner);
135   compact(cleaner.max_non_null_offset());
136 }
137 
compact(size_t max_non_null_offset)138 void ArchivePtrMarker::compact(size_t max_non_null_offset) {
139   assert(!_compacted, "cannot compact again");
140   _ptrmap->resize(max_non_null_offset + 1);
141   _compacted = true;
142 }
143 
expand_top_to(char * newtop)144 char* DumpRegion::expand_top_to(char* newtop) {
145   assert(is_allocatable(), "must be initialized and not packed");
146   assert(newtop >= _top, "must not grow backwards");
147   if (newtop > _end) {
148     MetaspaceShared::report_out_of_space(_name, newtop - _top);
149     ShouldNotReachHere();
150   }
151 
152   if (_rs == MetaspaceShared::shared_rs()) {
153     uintx delta;
154     if (DynamicDumpSharedSpaces) {
155       delta = DynamicArchive::object_delta_uintx(newtop);
156     } else {
157       delta = MetaspaceShared::object_delta_uintx(newtop);
158     }
159     if (delta > MAX_SHARED_DELTA) {
160       // This is just a sanity check and should not appear in any real world usage. This
161       // happens only if you allocate more than 2GB of shared objects and would require
162       // millions of shared classes.
163       vm_exit_during_initialization("Out of memory in the CDS archive",
164                                     "Please reduce the number of shared classes.");
165     }
166   }
167 
168   MetaspaceShared::commit_to(_rs, _vs, newtop);
169   _top = newtop;
170   return _top;
171 }
172 
allocate(size_t num_bytes)173 char* DumpRegion::allocate(size_t num_bytes) {
174   char* p = (char*)align_up(_top, (size_t)SharedSpaceObjectAlignment);
175   char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment);
176   expand_top_to(newtop);
177   memset(p, 0, newtop - p);
178   return p;
179 }
180 
append_intptr_t(intptr_t n,bool need_to_mark)181 void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
182   assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
183   intptr_t *p = (intptr_t*)_top;
184   char* newtop = _top + sizeof(intptr_t);
185   expand_top_to(newtop);
186   *p = n;
187   if (need_to_mark) {
188     ArchivePtrMarker::mark_pointer(p);
189   }
190 }
191 
print(size_t total_bytes) const192 void DumpRegion::print(size_t total_bytes) const {
193   log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
194                  _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
195                  p2i(_base + MetaspaceShared::final_delta()));
196 }
197 
print_out_of_space_msg(const char * failing_region,size_t needed_bytes)198 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
199   log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
200                  _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
201   if (strcmp(_name, failing_region) == 0) {
202     log_error(cds)(" required = %d", int(needed_bytes));
203   }
204 }
205 
init(ReservedSpace * rs,VirtualSpace * vs)206 void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
207   _rs = rs;
208   _vs = vs;
209   // Start with 0 committed bytes. The memory will be committed as needed by
210   // MetaspaceShared::commit_to().
211   if (!_vs->initialize(*_rs, 0)) {
212     fatal("Unable to allocate memory for shared space");
213   }
214   _base = _top = _rs->base();
215   _end = _rs->end();
216 }
217 
pack(DumpRegion * next)218 void DumpRegion::pack(DumpRegion* next) {
219   assert(!is_packed(), "sanity");
220   _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
221   _is_packed = true;
222   if (next != NULL) {
223     next->_rs = _rs;
224     next->_vs = _vs;
225     next->_base = next->_top = this->_end;
226     next->_end = _rs->end();
227   }
228 }
229 
do_oop(oop * o)230 void WriteClosure::do_oop(oop* o) {
231   if (*o == NULL) {
232     _dump_region->append_intptr_t(0);
233   } else {
234     assert(HeapShared::is_heap_object_archiving_allowed(),
235            "Archiving heap object is not allowed");
236     _dump_region->append_intptr_t(
237       (intptr_t)CompressedOops::encode_not_null(*o));
238   }
239 }
240 
do_region(u_char * start,size_t size)241 void WriteClosure::do_region(u_char* start, size_t size) {
242   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
243   assert(size % sizeof(intptr_t) == 0, "bad size");
244   do_tag((int)size);
245   while (size > 0) {
246     _dump_region->append_intptr_t(*(intptr_t*)start, true);
247     start += sizeof(intptr_t);
248     size -= sizeof(intptr_t);
249   }
250 }
251 
do_ptr(void ** p)252 void ReadClosure::do_ptr(void** p) {
253   assert(*p == NULL, "initializing previous initialized pointer.");
254   intptr_t obj = nextPtr();
255   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
256          "hit tag while initializing ptrs.");
257   *p = (void*)obj;
258 }
259 
do_u4(u4 * p)260 void ReadClosure::do_u4(u4* p) {
261   intptr_t obj = nextPtr();
262   *p = (u4)(uintx(obj));
263 }
264 
do_bool(bool * p)265 void ReadClosure::do_bool(bool* p) {
266   intptr_t obj = nextPtr();
267   *p = (bool)(uintx(obj));
268 }
269 
do_tag(int tag)270 void ReadClosure::do_tag(int tag) {
271   int old_tag;
272   old_tag = (int)(intptr_t)nextPtr();
273   // do_int(&old_tag);
274   assert(tag == old_tag, "old tag doesn't match");
275   FileMapInfo::assert_mark(tag == old_tag);
276 }
277 
do_oop(oop * p)278 void ReadClosure::do_oop(oop *p) {
279   narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
280   if (CompressedOops::is_null(o) || !HeapShared::open_archive_heap_region_mapped()) {
281     *p = NULL;
282   } else {
283     assert(HeapShared::is_heap_object_archiving_allowed(),
284            "Archived heap object is not allowed");
285     assert(HeapShared::open_archive_heap_region_mapped(),
286            "Open archive heap region is not mapped");
287     *p = HeapShared::decode_from_archive(o);
288   }
289 }
290 
do_region(u_char * start,size_t size)291 void ReadClosure::do_region(u_char* start, size_t size) {
292   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
293   assert(size % sizeof(intptr_t) == 0, "bad size");
294   do_tag((int)size);
295   while (size > 0) {
296     *(intptr_t*)start = nextPtr();
297     start += sizeof(intptr_t);
298     size -= sizeof(intptr_t);
299   }
300 }
301 
302 fileStream* ClassListWriter::_classlist_file = NULL;
303 
log_to_classlist(BootstrapInfo * bootstrap_specifier,TRAPS)304 void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) {
305   if (ClassListWriter::is_enabled()) {
306     if (SystemDictionaryShared::is_supported_invokedynamic(bootstrap_specifier)) {
307       ResourceMark rm(THREAD);
308       const constantPoolHandle& pool = bootstrap_specifier->pool();
309       int pool_index = bootstrap_specifier->bss_index();
310       ClassListWriter w;
311       w.stream()->print("%s %s", LAMBDA_PROXY_TAG, pool->pool_holder()->name()->as_C_string());
312       CDSIndyInfo cii;
313       ClassListParser::populate_cds_indy_info(pool, pool_index, &cii, THREAD);
314       GrowableArray<const char*>* indy_items = cii.items();
315       for (int i = 0; i < indy_items->length(); i++) {
316         w.stream()->print(" %s", indy_items->at(i));
317       }
318       w.stream()->cr();
319     }
320   }
321 }
322 
check_for_oom(oop exception)323 void ArchiveUtils::check_for_oom(oop exception) {
324   assert(exception != nullptr, "Sanity check");
325   if (exception->is_a(SystemDictionary::OutOfMemoryError_klass())) {
326     vm_direct_exit(-1,
327       err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
328               SIZE_FORMAT "M", MaxHeapSize/M));
329   }
330 }
331