1 /*
2  * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/relocInfo.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "interpreter/bytecode.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/heap.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "prims/forte.hpp"
41 #include "prims/jvmtiExport.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/interfaceSupport.inline.hpp"
44 #include "runtime/javaFrameAnchor.hpp"
45 #include "runtime/jniHandles.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/stubCodeGenerator.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "runtime/vframe.hpp"
52 #include "services/memoryService.hpp"
53 #include "utilities/align.hpp"
54 #ifdef COMPILER1
55 #include "c1/c1_Runtime1.hpp"
56 #endif
57 
compiler_name() const58 const char* CodeBlob::compiler_name() const {
59   return compilertype2name(_type);
60 }
61 
align_code_offset(int offset)62 unsigned int CodeBlob::align_code_offset(int offset) {
63   // align the size to CodeEntryAlignment
64   int header_size = (int)CodeHeap::header_size();
65   return align_up(offset + header_size, CodeEntryAlignment) - header_size;
66 }
67 
68 
69 // This must be consistent with the CodeBlob constructor's layout actions.
allocation_size(CodeBuffer * cb,int header_size)70 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
71   unsigned int size = header_size;
72   size += align_up(cb->total_relocation_size(), oopSize);
73   // align the size to CodeEntryAlignment
74   size = align_code_offset(size);
75   size += align_up(cb->total_content_size(), oopSize);
76   size += align_up(cb->total_oop_size(), oopSize);
77   size += align_up(cb->total_metadata_size(), oopSize);
78   return size;
79 }
80 
CodeBlob(const char * name,CompilerType type,const CodeBlobLayout & layout,int frame_complete_offset,int frame_size,ImmutableOopMapSet * oop_maps,bool caller_must_gc_arguments)81 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
82   _type(type),
83   _size(layout.size()),
84   _header_size(layout.header_size()),
85   _frame_complete_offset(frame_complete_offset),
86   _data_offset(layout.data_offset()),
87   _frame_size(frame_size),
88   _code_begin(layout.code_begin()),
89   _code_end(layout.code_end()),
90   _content_begin(layout.content_begin()),
91   _data_end(layout.data_end()),
92   _relocation_begin(layout.relocation_begin()),
93   _relocation_end(layout.relocation_end()),
94   _oop_maps(oop_maps),
95   _caller_must_gc_arguments(caller_must_gc_arguments),
96   _name(name)
97   NOT_PRODUCT(COMMA _strings(CodeStrings()))
98 {
99   assert(is_aligned(layout.size(),            oopSize), "unaligned size");
100   assert(is_aligned(layout.header_size(),     oopSize), "unaligned size");
101   assert(is_aligned(layout.relocation_size(), oopSize), "unaligned size");
102   assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
103 #ifdef COMPILER1
104   // probably wrong for tiered
105   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
106 #endif // COMPILER1
107   S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
108 }
109 
CodeBlob(const char * name,CompilerType type,const CodeBlobLayout & layout,CodeBuffer * cb,int frame_complete_offset,int frame_size,OopMapSet * oop_maps,bool caller_must_gc_arguments)110 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
111   _type(type),
112   _size(layout.size()),
113   _header_size(layout.header_size()),
114   _frame_complete_offset(frame_complete_offset),
115   _data_offset(layout.data_offset()),
116   _frame_size(frame_size),
117   _code_begin(layout.code_begin()),
118   _code_end(layout.code_end()),
119   _content_begin(layout.content_begin()),
120   _data_end(layout.data_end()),
121   _relocation_begin(layout.relocation_begin()),
122   _relocation_end(layout.relocation_end()),
123   _caller_must_gc_arguments(caller_must_gc_arguments),
124   _name(name)
125   NOT_PRODUCT(COMMA _strings(CodeStrings()))
126 {
127   assert(is_aligned(_size,        oopSize), "unaligned size");
128   assert(is_aligned(_header_size, oopSize), "unaligned size");
129   assert(_data_offset <= _size, "codeBlob is too small");
130   assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
131 
132   set_oop_maps(oop_maps);
133 #ifdef COMPILER1
134   // probably wrong for tiered
135   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
136 #endif // COMPILER1
137   S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
138 }
139 
140 
141 // Creates a simple CodeBlob. Sets up the size of the different regions.
RuntimeBlob(const char * name,int header_size,int size,int frame_complete,int locs_size)142 RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
143   : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
144 {
145   assert(is_aligned(locs_size, oopSize), "unaligned size");
146 }
147 
148 
149 // Creates a RuntimeBlob from a CodeBuffer
150 // and copy code and relocation info.
RuntimeBlob(const char * name,CodeBuffer * cb,int header_size,int size,int frame_complete,int frame_size,OopMapSet * oop_maps,bool caller_must_gc_arguments)151 RuntimeBlob::RuntimeBlob(
152   const char* name,
153   CodeBuffer* cb,
154   int         header_size,
155   int         size,
156   int         frame_complete,
157   int         frame_size,
158   OopMapSet*  oop_maps,
159   bool        caller_must_gc_arguments
160 ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
161   cb->copy_code_and_locs_to(this);
162 }
163 
flush()164 void CodeBlob::flush() {
165   FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
166   _oop_maps = NULL;
167   NOT_PRODUCT(_strings.free();)
168 }
169 
set_oop_maps(OopMapSet * p)170 void CodeBlob::set_oop_maps(OopMapSet* p) {
171   // Danger Will Robinson! This method allocates a big
172   // chunk of memory, its your job to free it.
173   if (p != NULL) {
174     _oop_maps = ImmutableOopMapSet::build_from(p);
175   } else {
176     _oop_maps = NULL;
177   }
178 }
179 
180 
trace_new_stub(RuntimeBlob * stub,const char * name1,const char * name2)181 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
182   // Do not hold the CodeCache lock during name formatting.
183   assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
184 
185   if (stub != NULL) {
186     char stub_id[256];
187     assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
188     jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
189     if (PrintStubCode) {
190       ttyLocker ttyl;
191       tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
192       tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, (intptr_t) stub);
193       Disassembler::decode(stub->code_begin(), stub->code_end(), tty);
194       if ((stub->oop_maps() != NULL) && AbstractDisassembler::show_structs()) {
195         tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
196         stub->oop_maps()->print();
197       }
198       tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
199       tty->cr();
200     }
201     Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
202 
203     if (JvmtiExport::should_post_dynamic_code_generated()) {
204       const char* stub_name = name2;
205       if (name2[0] == '\0')  stub_name = name1;
206       JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
207     }
208   }
209 
210   // Track memory usage statistic after releasing CodeCache_lock
211   MemoryService::track_code_cache_memory_usage();
212 }
213 
oop_map_for_return_address(address return_address)214 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) {
215   assert(_oop_maps != NULL, "nope");
216   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
217 }
218 
print_code()219 void CodeBlob::print_code() {
220   ResourceMark m;
221   Disassembler::decode(this, tty);
222 }
223 
224 //----------------------------------------------------------------------------------------------------
225 // Implementation of BufferBlob
226 
227 
BufferBlob(const char * name,int size)228 BufferBlob::BufferBlob(const char* name, int size)
229 : RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
230 {}
231 
create(const char * name,int buffer_size)232 BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
233   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
234 
235   BufferBlob* blob = NULL;
236   unsigned int size = sizeof(BufferBlob);
237   // align the size to CodeEntryAlignment
238   size = CodeBlob::align_code_offset(size);
239   size += align_up(buffer_size, oopSize);
240   assert(name != NULL, "must provide a name");
241   {
242     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
243     blob = new (size) BufferBlob(name, size);
244   }
245   // Track memory usage statistic after releasing CodeCache_lock
246   MemoryService::track_code_cache_memory_usage();
247 
248   return blob;
249 }
250 
251 
BufferBlob(const char * name,int size,CodeBuffer * cb)252 BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
253   : RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
254 {}
255 
create(const char * name,CodeBuffer * cb)256 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
257   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
258 
259   BufferBlob* blob = NULL;
260   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
261   assert(name != NULL, "must provide a name");
262   {
263     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
264     blob = new (size) BufferBlob(name, size, cb);
265   }
266   // Track memory usage statistic after releasing CodeCache_lock
267   MemoryService::track_code_cache_memory_usage();
268 
269   return blob;
270 }
271 
operator new(size_t s,unsigned size)272 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
273   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
274 }
275 
free(BufferBlob * blob)276 void BufferBlob::free(BufferBlob *blob) {
277   assert(blob != NULL, "caller must check for NULL");
278   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
279   blob->flush();
280   {
281     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
282     CodeCache::free((RuntimeBlob*)blob);
283   }
284   // Track memory usage statistic after releasing CodeCache_lock
285   MemoryService::track_code_cache_memory_usage();
286 }
287 
288 
289 //----------------------------------------------------------------------------------------------------
290 // Implementation of AdapterBlob
291 
AdapterBlob(int size,CodeBuffer * cb)292 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
293   BufferBlob("I2C/C2I adapters", size, cb) {
294   CodeCache::commit(this);
295 }
296 
create(CodeBuffer * cb)297 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
298   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
299 
300   AdapterBlob* blob = NULL;
301   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
302   {
303     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
304     blob = new (size) AdapterBlob(size, cb);
305   }
306   // Track memory usage statistic after releasing CodeCache_lock
307   MemoryService::track_code_cache_memory_usage();
308 
309   return blob;
310 }
311 
operator new(size_t s,unsigned size)312 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
313   // Handling of allocation failure stops compilation and prints a bunch of
314   // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
315   // can be locked, and then re-locking the CodeCache_lock. That is not safe in
316   // this context as we hold the CompiledICLocker. So we just don't handle code
317   // cache exhaustion here; we leave that for a later allocation that does not
318   // hold the CompiledICLocker.
319   return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
320 }
321 
VtableBlob(const char * name,int size)322 VtableBlob::VtableBlob(const char* name, int size) :
323   BufferBlob(name, size) {
324 }
325 
create(const char * name,int buffer_size)326 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
327   assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
328 
329   VtableBlob* blob = NULL;
330   unsigned int size = sizeof(VtableBlob);
331   // align the size to CodeEntryAlignment
332   size = align_code_offset(size);
333   size += align_up(buffer_size, oopSize);
334   assert(name != NULL, "must provide a name");
335   {
336     if (!CodeCache_lock->try_lock()) {
337       // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
338       // IC transition to megamorphic, for which this stub will be needed. It is better to
339       // bail out the transition, and wait for a more opportune moment. Not only is it not
340       // worth waiting for the lock blockingly for the megamorphic transition, it might
341       // also result in a deadlock to blockingly wait, when concurrent class unloading is
342       // performed. At this point in time, the CompiledICLocker is taken, so we are not
343       // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
344       // consistently taken in the opposite order. Bailing out results in an IC transition to
345       // the clean state instead, which will cause subsequent calls to retry the transitioning
346       // eventually.
347       return NULL;
348     }
349     blob = new (size) VtableBlob(name, size);
350     CodeCache_lock->unlock();
351   }
352   // Track memory usage statistic after releasing CodeCache_lock
353   MemoryService::track_code_cache_memory_usage();
354 
355   return blob;
356 }
357 
358 //----------------------------------------------------------------------------------------------------
359 // Implementation of MethodHandlesAdapterBlob
360 
create(int buffer_size)361 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
362   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
363 
364   MethodHandlesAdapterBlob* blob = NULL;
365   unsigned int size = sizeof(MethodHandlesAdapterBlob);
366   // align the size to CodeEntryAlignment
367   size = CodeBlob::align_code_offset(size);
368   size += align_up(buffer_size, oopSize);
369   {
370     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
371     blob = new (size) MethodHandlesAdapterBlob(size);
372     if (blob == NULL) {
373       vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
374     }
375   }
376   // Track memory usage statistic after releasing CodeCache_lock
377   MemoryService::track_code_cache_memory_usage();
378 
379   return blob;
380 }
381 
382 //----------------------------------------------------------------------------------------------------
383 // Implementation of RuntimeStub
384 
RuntimeStub(const char * name,CodeBuffer * cb,int size,int frame_complete,int frame_size,OopMapSet * oop_maps,bool caller_must_gc_arguments)385 RuntimeStub::RuntimeStub(
386   const char* name,
387   CodeBuffer* cb,
388   int         size,
389   int         frame_complete,
390   int         frame_size,
391   OopMapSet*  oop_maps,
392   bool        caller_must_gc_arguments
393 )
394 : RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
395 {
396 }
397 
new_runtime_stub(const char * stub_name,CodeBuffer * cb,int frame_complete,int frame_size,OopMapSet * oop_maps,bool caller_must_gc_arguments)398 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
399                                            CodeBuffer* cb,
400                                            int frame_complete,
401                                            int frame_size,
402                                            OopMapSet* oop_maps,
403                                            bool caller_must_gc_arguments)
404 {
405   RuntimeStub* stub = NULL;
406   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
407   {
408     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
409     unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
410     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
411   }
412 
413   trace_new_stub(stub, "RuntimeStub - ", stub_name);
414 
415   return stub;
416 }
417 
418 
operator new(size_t s,unsigned size)419 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
420   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
421   if (!p) fatal("Initial size of CodeCache is too small");
422   return p;
423 }
424 
425 // operator new shared by all singletons:
operator new(size_t s,unsigned size)426 void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
427   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
428   if (!p) fatal("Initial size of CodeCache is too small");
429   return p;
430 }
431 
432 
433 //----------------------------------------------------------------------------------------------------
434 // Implementation of DeoptimizationBlob
435 
DeoptimizationBlob(CodeBuffer * cb,int size,OopMapSet * oop_maps,int unpack_offset,int unpack_with_exception_offset,int unpack_with_reexecution_offset,int frame_size)436 DeoptimizationBlob::DeoptimizationBlob(
437   CodeBuffer* cb,
438   int         size,
439   OopMapSet*  oop_maps,
440   int         unpack_offset,
441   int         unpack_with_exception_offset,
442   int         unpack_with_reexecution_offset,
443   int         frame_size
444 )
445 : SingletonBlob("DeoptimizationBlob", cb, sizeof(DeoptimizationBlob), size, frame_size, oop_maps)
446 {
447   _unpack_offset           = unpack_offset;
448   _unpack_with_exception   = unpack_with_exception_offset;
449   _unpack_with_reexecution = unpack_with_reexecution_offset;
450 #ifdef COMPILER1
451   _unpack_with_exception_in_tls   = -1;
452 #endif
453 }
454 
455 
create(CodeBuffer * cb,OopMapSet * oop_maps,int unpack_offset,int unpack_with_exception_offset,int unpack_with_reexecution_offset,int frame_size)456 DeoptimizationBlob* DeoptimizationBlob::create(
457   CodeBuffer* cb,
458   OopMapSet*  oop_maps,
459   int        unpack_offset,
460   int        unpack_with_exception_offset,
461   int        unpack_with_reexecution_offset,
462   int        frame_size)
463 {
464   DeoptimizationBlob* blob = NULL;
465   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
466   {
467     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
468     unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
469     blob = new (size) DeoptimizationBlob(cb,
470                                          size,
471                                          oop_maps,
472                                          unpack_offset,
473                                          unpack_with_exception_offset,
474                                          unpack_with_reexecution_offset,
475                                          frame_size);
476   }
477 
478   trace_new_stub(blob, "DeoptimizationBlob");
479 
480   return blob;
481 }
482 
483 
484 //----------------------------------------------------------------------------------------------------
485 // Implementation of UncommonTrapBlob
486 
487 #ifdef COMPILER2
UncommonTrapBlob(CodeBuffer * cb,int size,OopMapSet * oop_maps,int frame_size)488 UncommonTrapBlob::UncommonTrapBlob(
489   CodeBuffer* cb,
490   int         size,
491   OopMapSet*  oop_maps,
492   int         frame_size
493 )
494 : SingletonBlob("UncommonTrapBlob", cb, sizeof(UncommonTrapBlob), size, frame_size, oop_maps)
495 {}
496 
497 
create(CodeBuffer * cb,OopMapSet * oop_maps,int frame_size)498 UncommonTrapBlob* UncommonTrapBlob::create(
499   CodeBuffer* cb,
500   OopMapSet*  oop_maps,
501   int        frame_size)
502 {
503   UncommonTrapBlob* blob = NULL;
504   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
505   {
506     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
507     unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
508     blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
509   }
510 
511   trace_new_stub(blob, "UncommonTrapBlob");
512 
513   return blob;
514 }
515 
516 
517 #endif // COMPILER2
518 
519 
520 //----------------------------------------------------------------------------------------------------
521 // Implementation of ExceptionBlob
522 
523 #ifdef COMPILER2
ExceptionBlob(CodeBuffer * cb,int size,OopMapSet * oop_maps,int frame_size)524 ExceptionBlob::ExceptionBlob(
525   CodeBuffer* cb,
526   int         size,
527   OopMapSet*  oop_maps,
528   int         frame_size
529 )
530 : SingletonBlob("ExceptionBlob", cb, sizeof(ExceptionBlob), size, frame_size, oop_maps)
531 {}
532 
533 
create(CodeBuffer * cb,OopMapSet * oop_maps,int frame_size)534 ExceptionBlob* ExceptionBlob::create(
535   CodeBuffer* cb,
536   OopMapSet*  oop_maps,
537   int         frame_size)
538 {
539   ExceptionBlob* blob = NULL;
540   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
541   {
542     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
543     unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
544     blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
545   }
546 
547   trace_new_stub(blob, "ExceptionBlob");
548 
549   return blob;
550 }
551 
552 
553 #endif // COMPILER2
554 
555 
556 //----------------------------------------------------------------------------------------------------
557 // Implementation of SafepointBlob
558 
SafepointBlob(CodeBuffer * cb,int size,OopMapSet * oop_maps,int frame_size)559 SafepointBlob::SafepointBlob(
560   CodeBuffer* cb,
561   int         size,
562   OopMapSet*  oop_maps,
563   int         frame_size
564 )
565 : SingletonBlob("SafepointBlob", cb, sizeof(SafepointBlob), size, frame_size, oop_maps)
566 {}
567 
568 
create(CodeBuffer * cb,OopMapSet * oop_maps,int frame_size)569 SafepointBlob* SafepointBlob::create(
570   CodeBuffer* cb,
571   OopMapSet*  oop_maps,
572   int         frame_size)
573 {
574   SafepointBlob* blob = NULL;
575   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
576   {
577     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
578     unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
579     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
580   }
581 
582   trace_new_stub(blob, "SafepointBlob");
583 
584   return blob;
585 }
586 
587 
588 //----------------------------------------------------------------------------------------------------
589 // Verification and printing
590 
print_on(outputStream * st) const591 void CodeBlob::print_on(outputStream* st) const {
592   st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
593   st->print_cr("Framesize: %d", _frame_size);
594 }
595 
print() const596 void CodeBlob::print() const { print_on(tty); }
597 
print_value_on(outputStream * st) const598 void CodeBlob::print_value_on(outputStream* st) const {
599   st->print_cr("[CodeBlob]");
600 }
601 
dump_for_addr(address addr,outputStream * st,bool verbose) const602 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
603   if (is_buffer_blob()) {
604     // the interpreter is generated into a buffer blob
605     InterpreterCodelet* i = Interpreter::codelet_containing(addr);
606     if (i != NULL) {
607       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
608       i->print_on(st);
609       return;
610     }
611     if (Interpreter::contains(addr)) {
612       st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
613                    " (not bytecode specific)", p2i(addr));
614       return;
615     }
616     //
617     if (AdapterHandlerLibrary::contains(this)) {
618       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
619       AdapterHandlerLibrary::print_handler_on(st, this);
620     }
621     // the stubroutines are generated into a buffer blob
622     StubCodeDesc* d = StubCodeDesc::desc_for(addr);
623     if (d != NULL) {
624       st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
625       d->print_on(st);
626       st->cr();
627       return;
628     }
629     if (StubRoutines::contains(addr)) {
630       st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
631       return;
632     }
633     // the InlineCacheBuffer is using stubs generated into a buffer blob
634     if (InlineCacheBuffer::contains(addr)) {
635       st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
636       return;
637     }
638     VtableStub* v = VtableStubs::stub_containing(addr);
639     if (v != NULL) {
640       st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
641       v->print_on(st);
642       st->cr();
643       return;
644     }
645   }
646   if (is_nmethod()) {
647     nmethod* nm = (nmethod*)this;
648     ResourceMark rm;
649     st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
650               p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
651     if (verbose) {
652       st->print(" for ");
653       nm->method()->print_value_on(st);
654     }
655     st->cr();
656     nm->print_nmethod(verbose);
657     return;
658   }
659   st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
660   print_on(st);
661 }
662 
verify()663 void RuntimeBlob::verify() {
664   ShouldNotReachHere();
665 }
666 
verify()667 void BufferBlob::verify() {
668   // unimplemented
669 }
670 
print_on(outputStream * st) const671 void BufferBlob::print_on(outputStream* st) const {
672   RuntimeBlob::print_on(st);
673   print_value_on(st);
674 }
675 
print_value_on(outputStream * st) const676 void BufferBlob::print_value_on(outputStream* st) const {
677   st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
678 }
679 
verify()680 void RuntimeStub::verify() {
681   // unimplemented
682 }
683 
print_on(outputStream * st) const684 void RuntimeStub::print_on(outputStream* st) const {
685   ttyLocker ttyl;
686   RuntimeBlob::print_on(st);
687   st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
688   st->print_cr("%s", name());
689   Disassembler::decode((RuntimeBlob*)this, st);
690 }
691 
print_value_on(outputStream * st) const692 void RuntimeStub::print_value_on(outputStream* st) const {
693   st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
694 }
695 
verify()696 void SingletonBlob::verify() {
697   // unimplemented
698 }
699 
print_on(outputStream * st) const700 void SingletonBlob::print_on(outputStream* st) const {
701   ttyLocker ttyl;
702   RuntimeBlob::print_on(st);
703   st->print_cr("%s", name());
704   Disassembler::decode((RuntimeBlob*)this, st);
705 }
706 
print_value_on(outputStream * st) const707 void SingletonBlob::print_value_on(outputStream* st) const {
708   st->print_cr("%s", name());
709 }
710 
print_value_on(outputStream * st) const711 void DeoptimizationBlob::print_value_on(outputStream* st) const {
712   st->print_cr("Deoptimization (frame not available)");
713 }
714 
715 // Implementation of OptimizedEntryBlob
716 
OptimizedEntryBlob(const char * name,int size,CodeBuffer * cb,intptr_t exception_handler_offset,jobject receiver,ByteSize frame_data_offset)717 OptimizedEntryBlob::OptimizedEntryBlob(const char* name, int size, CodeBuffer* cb, intptr_t exception_handler_offset,
718                                        jobject receiver, ByteSize frame_data_offset) :
719   BufferBlob(name, size, cb),
720   _exception_handler_offset(exception_handler_offset),
721   _receiver(receiver),
722   _frame_data_offset(frame_data_offset) {
723   CodeCache::commit(this);
724 }
725 
create(const char * name,CodeBuffer * cb,intptr_t exception_handler_offset,jobject receiver,ByteSize frame_data_offset)726 OptimizedEntryBlob* OptimizedEntryBlob::create(const char* name, CodeBuffer* cb, intptr_t exception_handler_offset,
727                                                jobject receiver, ByteSize frame_data_offset) {
728   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
729 
730   OptimizedEntryBlob* blob = nullptr;
731   unsigned int size = CodeBlob::allocation_size(cb, sizeof(OptimizedEntryBlob));
732   {
733     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
734     blob = new (size) OptimizedEntryBlob(name, size, cb, exception_handler_offset, receiver, frame_data_offset);
735   }
736   // Track memory usage statistic after releasing CodeCache_lock
737   MemoryService::track_code_cache_memory_usage();
738 
739   return blob;
740 }
741 
oops_do(OopClosure * f,const frame & frame)742 void OptimizedEntryBlob::oops_do(OopClosure* f, const frame& frame) {
743   frame_data_for_frame(frame)->old_handles->oops_do(f);
744 }
745 
jfa_for_frame(const frame & frame) const746 JavaFrameAnchor* OptimizedEntryBlob::jfa_for_frame(const frame& frame) const {
747   return &frame_data_for_frame(frame)->jfa;
748 }
749