1 /*
2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/nmethod.hpp"
29 #include "code/scopeDesc.hpp"
30 #include "compiler/oopMap.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/iterator.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/signature.hpp"
38 #include "utilities/align.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_Defs.hpp"
41 #endif
42 #ifdef COMPILER2
43 #include "opto/optoreg.hpp"
44 #endif
45
46 // OopMapStream
47
OopMapStream(OopMap * oop_map)48 OopMapStream::OopMapStream(OopMap* oop_map) {
49 _stream = new CompressedReadStream(oop_map->write_stream()->buffer());
50 _size = oop_map->omv_count();
51 _position = 0;
52 _valid_omv = false;
53 }
54
OopMapStream(const ImmutableOopMap * oop_map)55 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) {
56 _stream = new CompressedReadStream(oop_map->data_addr());
57 _size = oop_map->count();
58 _position = 0;
59 _valid_omv = false;
60 }
61
find_next()62 void OopMapStream::find_next() {
63 if (_position++ < _size) {
64 _omv.read_from(_stream);
65 _valid_omv = true;
66 return;
67 }
68 _valid_omv = false;
69 }
70
71
72 // OopMap
73
74 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
75 // slots to hold 4-byte values like ints and floats in the LP64 build.
OopMap(int frame_size,int arg_count)76 OopMap::OopMap(int frame_size, int arg_count) {
77 // OopMaps are usually quite so small, so pick a small initial size
78 set_write_stream(new CompressedWriteStream(32));
79 set_omv_count(0);
80
81 #ifdef ASSERT
82 _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
83 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
84 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
85 #endif
86 }
87
88
OopMap(OopMap::DeepCopyToken,OopMap * source)89 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
90 // This constructor does a deep copy
91 // of the source OopMap.
92 set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
93 set_omv_count(0);
94 set_offset(source->offset());
95
96 #ifdef ASSERT
97 _locs_length = source->_locs_length;
98 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
99 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
100 #endif
101
102 // We need to copy the entries too.
103 for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
104 OopMapValue omv = oms.current();
105 omv.write_on(write_stream());
106 increment_count();
107 }
108 }
109
110
deep_copy()111 OopMap* OopMap::deep_copy() {
112 return new OopMap(_deep_copy_token, this);
113 }
114
copy_data_to(address addr) const115 void OopMap::copy_data_to(address addr) const {
116 memcpy(addr, write_stream()->buffer(), write_stream()->position());
117 }
118
heap_size() const119 int OopMap::heap_size() const {
120 int size = sizeof(OopMap);
121 int align = sizeof(void *) - 1;
122 size += write_stream()->position();
123 // Align to a reasonable ending point
124 size = ((size+align) & ~align);
125 return size;
126 }
127
128 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
129 // slots to hold 4-byte values like ints and floats in the LP64 build.
set_xxx(VMReg reg,OopMapValue::oop_types x,VMReg optional)130 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
131
132 assert(reg->value() < _locs_length, "too big reg value for stack size");
133 assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
134 debug_only( _locs_used[reg->value()] = x; )
135
136 OopMapValue o(reg, x, optional);
137 o.write_on(write_stream());
138 increment_count();
139 }
140
141
set_oop(VMReg reg)142 void OopMap::set_oop(VMReg reg) {
143 set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
144 }
145
146
set_narrowoop(VMReg reg)147 void OopMap::set_narrowoop(VMReg reg) {
148 set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
149 }
150
151
set_callee_saved(VMReg reg,VMReg caller_machine_register)152 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
153 set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
154 }
155
156
set_derived_oop(VMReg reg,VMReg derived_from_local_register)157 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
158 if( reg == derived_from_local_register ) {
159 // Actually an oop, derived shares storage with base,
160 set_oop(reg);
161 } else {
162 set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
163 }
164 }
165
166 // OopMapSet
167
OopMapSet()168 OopMapSet::OopMapSet() {
169 set_om_size(MinOopMapAllocation);
170 set_om_count(0);
171 OopMap** temp = NEW_RESOURCE_ARRAY(OopMap*, om_size());
172 set_om_data(temp);
173 }
174
175
grow_om_data()176 void OopMapSet::grow_om_data() {
177 int new_size = om_size() * 2;
178 OopMap** new_data = NEW_RESOURCE_ARRAY(OopMap*, new_size);
179 memcpy(new_data,om_data(),om_size() * sizeof(OopMap*));
180 set_om_size(new_size);
181 set_om_data(new_data);
182 }
183
add_gc_map(int pc_offset,OopMap * map)184 void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
185 assert(om_size() != -1,"Cannot grow a fixed OopMapSet");
186
187 if(om_count() >= om_size()) {
188 grow_om_data();
189 }
190 map->set_offset(pc_offset);
191
192 #ifdef ASSERT
193 if(om_count() > 0) {
194 OopMap* last = at(om_count()-1);
195 if (last->offset() == map->offset() ) {
196 fatal("OopMap inserted twice");
197 }
198 if(last->offset() > map->offset()) {
199 tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
200 om_count(),last->offset(),om_count()+1,map->offset());
201 }
202 }
203 #endif // ASSERT
204
205 set(om_count(),map);
206 increment_count();
207 }
208
209
heap_size() const210 int OopMapSet::heap_size() const {
211 // The space we use
212 int size = sizeof(OopMap);
213 int align = sizeof(void *) - 1;
214 size = ((size+align) & ~align);
215 size += om_count() * sizeof(OopMap*);
216
217 // Now add in the space needed for the indivdiual OopMaps
218 for(int i=0; i < om_count(); i++) {
219 size += at(i)->heap_size();
220 }
221 // We don't need to align this, it will be naturally pointer aligned
222 return size;
223 }
224
225
singular_oop_map()226 OopMap* OopMapSet::singular_oop_map() {
227 guarantee(om_count() == 1, "Make sure we only have a single gc point");
228 return at(0);
229 }
230
231
find_map_at_offset(int pc_offset) const232 OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
233 int i, len = om_count();
234 assert( len > 0, "must have pointer maps" );
235
236 // Scan through oopmaps. Stop when current offset is either equal or greater
237 // than the one we are looking for.
238 for( i = 0; i < len; i++) {
239 if( at(i)->offset() >= pc_offset )
240 break;
241 }
242
243 assert( i < len, "oopmap not found" );
244
245 OopMap* m = at(i);
246 assert( m->offset() == pc_offset, "oopmap not found" );
247 return m;
248 }
249
add_derived_oop(oop * base,oop * derived)250 static void add_derived_oop(oop* base, oop* derived) {
251 #if !defined(TIERED) && !INCLUDE_JVMCI
252 COMPILER1_PRESENT(ShouldNotReachHere();)
253 #endif // !defined(TIERED) && !INCLUDE_JVMCI
254 #if COMPILER2_OR_JVMCI
255 DerivedPointerTable::add(derived, base);
256 #endif // COMPILER2_OR_JVMCI
257 }
258
259
260 #ifndef PRODUCT
trace_codeblob_maps(const frame * fr,const RegisterMap * reg_map)261 static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
262 // Print oopmap and regmap
263 tty->print_cr("------ ");
264 CodeBlob* cb = fr->cb();
265 const ImmutableOopMapSet* maps = cb->oop_maps();
266 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
267 map->print();
268 if( cb->is_nmethod() ) {
269 nmethod* nm = (nmethod*)cb;
270 // native wrappers have no scope data, it is implied
271 if (nm->is_native_method()) {
272 tty->print("bci: 0 (native)");
273 } else {
274 ScopeDesc* scope = nm->scope_desc_at(fr->pc());
275 tty->print("bci: %d ",scope->bci());
276 }
277 }
278 tty->cr();
279 fr->print_on(tty);
280 tty->print(" ");
281 cb->print_value_on(tty); tty->cr();
282 reg_map->print();
283 tty->print_cr("------ ");
284
285 }
286 #endif // PRODUCT
287
oops_do(const frame * fr,const RegisterMap * reg_map,OopClosure * f)288 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f) {
289 // add derived oops to a table
290 all_do(fr, reg_map, f, add_derived_oop, &do_nothing_cl);
291 }
292
293
all_do(const frame * fr,const RegisterMap * reg_map,OopClosure * oop_fn,void derived_oop_fn (oop *,oop *),OopClosure * value_fn)294 void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
295 OopClosure* oop_fn, void derived_oop_fn(oop*, oop*),
296 OopClosure* value_fn) {
297 CodeBlob* cb = fr->cb();
298 assert(cb != NULL, "no codeblob");
299
300 NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
301
302 const ImmutableOopMapSet* maps = cb->oop_maps();
303 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
304 assert(map != NULL, "no ptr map found");
305
306 // handle derived pointers first (otherwise base pointer may be
307 // changed before derived pointer offset has been collected)
308 {
309 for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
310 OopMapValue omv = oms.current();
311 if (omv.type() != OopMapValue::derived_oop_value) {
312 continue;
313 }
314
315 #ifndef TIERED
316 COMPILER1_PRESENT(ShouldNotReachHere();)
317 #if INCLUDE_JVMCI
318 if (UseJVMCICompiler) {
319 ShouldNotReachHere();
320 }
321 #endif
322 #endif // !TIERED
323 // Protect the operation on the derived pointers. This
324 // protects the addition of derived pointers to the shared
325 // derived pointer table in DerivedPointerTable::add().
326 MutexLockerEx x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag);
327
328 oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
329 guarantee(loc != NULL, "missing saved register");
330 oop *derived_loc = loc;
331 oop *base_loc = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
332 // Ignore NULL oops and decoded NULL narrow oops which
333 // equal to Universe::narrow_oop_base when a narrow oop
334 // implicit null check is used in compiled code.
335 // The narrow_oop_base could be NULL or be the address
336 // of the page below heap depending on compressed oops mode.
337 if (base_loc != NULL && *base_loc != (oop)NULL && !Universe::is_narrow_oop_base(*base_loc)) {
338 derived_oop_fn(base_loc, derived_loc);
339 }
340 }
341 }
342
343 {
344 // We want coop and oop oop_types
345 for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
346 OopMapValue omv = oms.current();
347 oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
348 // It should be an error if no location can be found for a
349 // register mentioned as contained an oop of some kind. Maybe
350 // this was allowed previously because value_value items might
351 // be missing?
352 guarantee(loc != NULL, "missing saved register");
353 if ( omv.type() == OopMapValue::oop_value ) {
354 oop val = *loc;
355 if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
356 // Ignore NULL oops and decoded NULL narrow oops which
357 // equal to Universe::narrow_oop_base when a narrow oop
358 // implicit null check is used in compiled code.
359 // The narrow_oop_base could be NULL or be the address
360 // of the page below heap depending on compressed oops mode.
361 continue;
362 }
363 #ifdef ASSERT
364 // We can not verify the oop here if we are using ZGC, the oop
365 // will be bad in case we had a safepoint between a load and a
366 // load barrier.
367 if (!UseZGC &&
368 ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
369 !Universe::heap()->is_in_or_null(*loc))) {
370 tty->print_cr("# Found non oop pointer. Dumping state at failure");
371 // try to dump out some helpful debugging information
372 trace_codeblob_maps(fr, reg_map);
373 omv.print();
374 tty->print_cr("register r");
375 omv.reg()->print();
376 tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
377 // do the real assert.
378 assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
379 }
380 #endif // ASSERT
381 oop_fn->do_oop(loc);
382 } else if ( omv.type() == OopMapValue::narrowoop_value ) {
383 narrowOop *nl = (narrowOop*)loc;
384 #ifndef VM_LITTLE_ENDIAN
385 VMReg vmReg = omv.reg();
386 // Don't do this on SPARC float registers as they can be individually addressed
387 if (!vmReg->is_stack() SPARC_ONLY(&& !vmReg->is_FloatRegister())) {
388 // compressed oops in registers only take up 4 bytes of an
389 // 8 byte register but they are in the wrong part of the
390 // word so adjust loc to point at the right place.
391 nl = (narrowOop*)((address)nl + 4);
392 }
393 #endif
394 oop_fn->do_oop(nl);
395 }
396 }
397 }
398 }
399
400
401 // Update callee-saved register info for the following frame
update_register_map(const frame * fr,RegisterMap * reg_map)402 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
403 ResourceMark rm;
404 CodeBlob* cb = fr->cb();
405 assert(cb != NULL, "no codeblob");
406
407 // Any reg might be saved by a safepoint handler (see generate_handler_blob).
408 assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
409 "already updated this map; do not 'update' it twice!" );
410 debug_only(reg_map->_update_for_id = fr->id());
411
412 // Check if caller must update oop argument
413 assert((reg_map->include_argument_oops() ||
414 !cb->caller_must_gc_arguments(reg_map->thread())),
415 "include_argument_oops should already be set");
416
417 // Scan through oopmap and find location of all callee-saved registers
418 // (we do not do update in place, since info could be overwritten)
419
420 address pc = fr->pc();
421 const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
422 assert(map != NULL, "no ptr map found");
423 DEBUG_ONLY(int nof_callee = 0;)
424
425 for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
426 OopMapValue omv = oms.current();
427 if (omv.type() == OopMapValue::callee_saved_value) {
428 VMReg reg = omv.content_reg();
429 oop* loc = fr->oopmapreg_to_location(omv.reg(), reg_map);
430 reg_map->set_location(reg, (address) loc);
431 DEBUG_ONLY(nof_callee++;)
432 }
433 }
434
435 // Check that runtime stubs save all callee-saved registers
436 #ifdef COMPILER2
437 assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
438 (nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
439 "must save all");
440 #endif // COMPILER2
441 }
442
443 // Printing code is present in product build for -XX:+PrintAssembly.
444
445 static
print_register_type(OopMapValue::oop_types x,VMReg optional,outputStream * st)446 void print_register_type(OopMapValue::oop_types x, VMReg optional,
447 outputStream* st) {
448 switch( x ) {
449 case OopMapValue::oop_value:
450 st->print("Oop");
451 break;
452 case OopMapValue::narrowoop_value:
453 st->print("NarrowOop");
454 break;
455 case OopMapValue::callee_saved_value:
456 st->print("Callers_");
457 optional->print_on(st);
458 break;
459 case OopMapValue::derived_oop_value:
460 st->print("Derived_oop_");
461 optional->print_on(st);
462 break;
463 default:
464 ShouldNotReachHere();
465 }
466 }
467
print_on(outputStream * st) const468 void OopMapValue::print_on(outputStream* st) const {
469 reg()->print_on(st);
470 st->print("=");
471 print_register_type(type(),content_reg(),st);
472 st->print(" ");
473 }
474
print_on(outputStream * st) const475 void ImmutableOopMap::print_on(outputStream* st) const {
476 OopMapValue omv;
477 st->print("ImmutableOopMap{");
478 for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
479 omv = oms.current();
480 omv.print_on(st);
481 }
482 st->print("}");
483 }
484
print_on(outputStream * st) const485 void OopMap::print_on(outputStream* st) const {
486 OopMapValue omv;
487 st->print("OopMap{");
488 for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
489 omv = oms.current();
490 omv.print_on(st);
491 }
492 st->print("off=%d}", (int) offset());
493 }
494
print_on(outputStream * st) const495 void ImmutableOopMapSet::print_on(outputStream* st) const {
496 const ImmutableOopMap* last = NULL;
497 for (int i = 0; i < _count; ++i) {
498 const ImmutableOopMapPair* pair = pair_at(i);
499 const ImmutableOopMap* map = pair->get_from(this);
500 if (map != last) {
501 st->cr();
502 map->print_on(st);
503 st->print("pc offsets: ");
504 }
505 last = map;
506 st->print("%d ", pair->pc_offset());
507 }
508 }
509
print_on(outputStream * st) const510 void OopMapSet::print_on(outputStream* st) const {
511 int i, len = om_count();
512
513 st->print_cr("OopMapSet contains %d OopMaps\n",len);
514
515 for( i = 0; i < len; i++) {
516 OopMap* m = at(i);
517 st->print_cr("#%d ",i);
518 m->print_on(st);
519 st->cr();
520 }
521 }
522
equals(const OopMap * other) const523 bool OopMap::equals(const OopMap* other) const {
524 if (other->_omv_count != _omv_count) {
525 return false;
526 }
527 if (other->write_stream()->position() != write_stream()->position()) {
528 return false;
529 }
530 if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
531 return false;
532 }
533 return true;
534 }
535
find_map_at_offset(int pc_offset) const536 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
537 ImmutableOopMapPair* pairs = get_pairs();
538
539 int i;
540 for (i = 0; i < _count; ++i) {
541 if (pairs[i].pc_offset() >= pc_offset) {
542 break;
543 }
544 }
545 ImmutableOopMapPair* last = &pairs[i];
546
547 assert(last->pc_offset() == pc_offset, "oopmap not found");
548 return last->get_from(this);
549 }
550
get_from(const ImmutableOopMapSet * set) const551 const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const {
552 return set->oopmap_at_offset(_oopmap_offset);
553 }
554
ImmutableOopMap(const OopMap * oopmap)555 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) {
556 address addr = data_addr();
557 oopmap->copy_data_to(addr);
558 }
559
560 #ifdef ASSERT
nr_of_bytes() const561 int ImmutableOopMap::nr_of_bytes() const {
562 OopMapStream oms(this);
563
564 while (!oms.is_done()) {
565 oms.next();
566 }
567 return sizeof(ImmutableOopMap) + oms.stream_position();
568 }
569 #endif
570
ImmutableOopMapBuilder(const OopMapSet * set)571 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _new_set(NULL), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1) {
572 _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
573 }
574
size_for(const OopMap * map) const575 int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
576 return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
577 }
578
heap_size()579 int ImmutableOopMapBuilder::heap_size() {
580 int base = sizeof(ImmutableOopMapSet);
581 base = align_up(base, 8);
582
583 // all of ours pc / offset pairs
584 int pairs = _set->size() * sizeof(ImmutableOopMapPair);
585 pairs = align_up(pairs, 8);
586
587 for (int i = 0; i < _set->size(); ++i) {
588 int size = 0;
589 OopMap* map = _set->at(i);
590
591 if (is_empty(map)) {
592 /* only keep a single empty map in the set */
593 if (has_empty()) {
594 _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
595 } else {
596 _empty_offset = _offset;
597 _empty = map;
598 size = size_for(map);
599 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
600 }
601 } else if (is_last_duplicate(map)) {
602 /* if this entry is identical to the previous one, just point it there */
603 _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
604 } else {
605 /* not empty, not an identical copy of the previous entry */
606 size = size_for(map);
607 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
608 _last_offset = _offset;
609 _last = map;
610 }
611
612 assert(_mapping[i]._map == map, "check");
613 _offset += size;
614 }
615
616 int total = base + pairs + _offset;
617 DEBUG_ONLY(total += 8);
618 _required = total;
619 return total;
620 }
621
fill_pair(ImmutableOopMapPair * pair,const OopMap * map,int offset,const ImmutableOopMapSet * set)622 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
623 assert(offset < set->nr_of_bytes(), "check");
624 new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
625 }
626
fill_map(ImmutableOopMapPair * pair,const OopMap * map,int offset,const ImmutableOopMapSet * set)627 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
628 fill_pair(pair, map, offset, set);
629 address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
630
631 new (addr) ImmutableOopMap(map);
632 return size_for(map);
633 }
634
fill(ImmutableOopMapSet * set,int sz)635 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
636 ImmutableOopMapPair* pairs = set->get_pairs();
637
638 for (int i = 0; i < set->count(); ++i) {
639 const OopMap* map = _mapping[i]._map;
640 ImmutableOopMapPair* pair = NULL;
641 int size = 0;
642
643 if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
644 size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
645 } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
646 fill_pair(&pairs[i], map, _mapping[i]._offset, set);
647 }
648
649 const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
650 assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
651 }
652 }
653
654 #ifdef ASSERT
verify(address buffer,int size,const ImmutableOopMapSet * set)655 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
656 for (int i = 0; i < 8; ++i) {
657 assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
658 }
659
660 for (int i = 0; i < set->count(); ++i) {
661 const ImmutableOopMapPair* pair = set->pair_at(i);
662 assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
663 const ImmutableOopMap* map = pair->get_from(set);
664 int nr_of_bytes = map->nr_of_bytes();
665 assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
666 }
667 }
668 #endif
669
generate_into(address buffer)670 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
671 DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
672
673 _new_set = new (buffer) ImmutableOopMapSet(_set, _required);
674 fill(_new_set, _required);
675
676 DEBUG_ONLY(verify(buffer, _required, _new_set));
677
678 return _new_set;
679 }
680
build()681 ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
682 _required = heap_size();
683
684 // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
685 address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
686 return generate_into(buffer);
687 }
688
build_from(const OopMapSet * oopmap_set)689 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
690 ResourceMark mark;
691 ImmutableOopMapBuilder builder(oopmap_set);
692 return builder.build();
693 }
694
695
696 //------------------------------DerivedPointerTable---------------------------
697
698 #if COMPILER2_OR_JVMCI
699
700 class DerivedPointerEntry : public CHeapObj<mtCompiler> {
701 private:
702 oop* _location; // Location of derived pointer (also pointing to the base)
703 intptr_t _offset; // Offset from base pointer
704 public:
DerivedPointerEntry(oop * location,intptr_t offset)705 DerivedPointerEntry(oop* location, intptr_t offset) { _location = location; _offset = offset; }
location()706 oop* location() { return _location; }
offset()707 intptr_t offset() { return _offset; }
708 };
709
710
711 GrowableArray<DerivedPointerEntry*>* DerivedPointerTable::_list = NULL;
712 bool DerivedPointerTable::_active = false;
713
714
clear()715 void DerivedPointerTable::clear() {
716 // The first time, we create the list. Otherwise it should be
717 // empty. If not, then we have probably forgotton to call
718 // update_pointers after last GC/Scavenge.
719 assert (!_active, "should not be active");
720 assert(_list == NULL || _list->length() == 0, "table not empty");
721 if (_list == NULL) {
722 _list = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap
723 }
724 _active = true;
725 }
726
727
728 // Returns value of location as an int
value_of_loc(oop * pointer)729 intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
730
731
add(oop * derived_loc,oop * base_loc)732 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
733 assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
734 assert(derived_loc != base_loc, "Base and derived in same location");
735 if (_active) {
736 assert(*derived_loc != (oop)base_loc, "location already added");
737 assert(_list != NULL, "list must exist");
738 intptr_t offset = value_of_loc(derived_loc) - value_of_loc(base_loc);
739 // This assert is invalid because derived pointers can be
740 // arbitrarily far away from their base.
741 // assert(offset >= -1000000, "wrong derived pointer info");
742
743 if (TraceDerivedPointers) {
744 tty->print_cr(
745 "Add derived pointer@" INTPTR_FORMAT
746 " - Derived: " INTPTR_FORMAT
747 " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
748 p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset
749 );
750 }
751 // Set derived oop location to point to base.
752 *derived_loc = (oop)base_loc;
753 assert_lock_strong(DerivedPointerTableGC_lock);
754 DerivedPointerEntry *entry = new DerivedPointerEntry(derived_loc, offset);
755 _list->append(entry);
756 }
757 }
758
759
update_pointers()760 void DerivedPointerTable::update_pointers() {
761 assert(_list != NULL, "list must exist");
762 for(int i = 0; i < _list->length(); i++) {
763 DerivedPointerEntry* entry = _list->at(i);
764 oop* derived_loc = entry->location();
765 intptr_t offset = entry->offset();
766 // The derived oop was setup to point to location of base
767 oop base = **(oop**)derived_loc;
768 assert(Universe::heap()->is_in_or_null(base), "must be an oop");
769
770 *derived_loc = (oop)(((address)base) + offset);
771 assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check");
772
773 if (TraceDerivedPointers) {
774 tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
775 " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
776 p2i(derived_loc), p2i((address)*derived_loc), p2i((address)base), offset);
777 }
778
779 // Delete entry
780 delete entry;
781 _list->at_put(i, NULL);
782 }
783 // Clear list, so it is ready for next traversal (this is an invariant)
784 if (TraceDerivedPointers && !_list->is_empty()) {
785 tty->print_cr("--------------------------");
786 }
787 _list->clear();
788 _active = false;
789 }
790
791 #endif // COMPILER2_OR_JVMCI
792