1 /*
2  * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "cds/heapShared.hpp"
27 #include "classfile/resolutionErrors.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmClasses.hpp"
30 #include "interpreter/bytecodeStream.hpp"
31 #include "interpreter/bytecodes.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/linkResolver.hpp"
34 #include "interpreter/rewriter.hpp"
35 #include "logging/log.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/metadataFactory.hpp"
38 #include "memory/metaspaceClosure.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "oops/access.inline.hpp"
41 #include "oops/compressedOops.hpp"
42 #include "oops/constantPool.inline.hpp"
43 #include "oops/cpCache.inline.hpp"
44 #include "oops/objArrayOop.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "prims/methodHandles.hpp"
47 #include "runtime/arguments.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/vm_version.hpp"
51 #include "utilities/macros.hpp"
52 
53 // Implementation of ConstantPoolCacheEntry
54 
initialize_entry(int index)55 void ConstantPoolCacheEntry::initialize_entry(int index) {
56   assert(0 < index && index < 0x10000, "sanity check");
57   _indices = index;
58   _f1 = NULL;
59   _f2 = _flags = 0;
60   assert(constant_pool_index() == index, "");
61 }
62 
verify_just_initialized(bool f2_used)63 void ConstantPoolCacheEntry::verify_just_initialized(bool f2_used) {
64   assert((_indices & (~cp_index_mask)) == 0, "sanity");
65   assert(_f1 == NULL, "sanity");
66   assert(_flags == 0, "sanity");
67   if (!f2_used) {
68     assert(_f2 == 0, "sanity");
69   }
70 }
71 
reinitialize(bool f2_used)72 void ConstantPoolCacheEntry::reinitialize(bool f2_used) {
73   _indices &= cp_index_mask;
74   _f1 = NULL;
75   _flags = 0;
76   if (!f2_used) {
77     _f2 = 0;
78   }
79 }
80 
make_flags(TosState state,int option_bits,int field_index_or_method_params)81 int ConstantPoolCacheEntry::make_flags(TosState state,
82                                        int option_bits,
83                                        int field_index_or_method_params) {
84   assert(state < number_of_states, "Invalid state in make_flags");
85   int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
86   // Preserve existing flag bit values
87   // The low bits are a field offset, or else the method parameter size.
88 #ifdef ASSERT
89   TosState old_state = flag_state();
90   assert(old_state == (TosState)0 || old_state == state,
91          "inconsistent cpCache flags state");
92 #endif
93   return (_flags | f) ;
94 }
95 
set_bytecode_1(Bytecodes::Code code)96 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
97 #ifdef ASSERT
98   // Read once.
99   volatile Bytecodes::Code c = bytecode_1();
100   assert(c == 0 || c == code || code == 0, "update must be consistent");
101 #endif
102   // Need to flush pending stores here before bytecode is written.
103   Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
104 }
105 
set_bytecode_2(Bytecodes::Code code)106 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
107 #ifdef ASSERT
108   // Read once.
109   volatile Bytecodes::Code c = bytecode_2();
110   assert(c == 0 || c == code || code == 0, "update must be consistent");
111 #endif
112   // Need to flush pending stores here before bytecode is written.
113   Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
114 }
115 
116 // Sets f1, ordering with previous writes.
release_set_f1(Metadata * f1)117 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
118   assert(f1 != NULL, "");
119   Atomic::release_store(&_f1, f1);
120 }
121 
set_indy_resolution_failed()122 void ConstantPoolCacheEntry::set_indy_resolution_failed() {
123   Atomic::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
124 }
125 
126 // Note that concurrent update of both bytecodes can leave one of them
127 // reset to zero.  This is harmless; the interpreter will simply re-resolve
128 // the damaged entry.  More seriously, the memory synchronization is needed
129 // to flush other fields (f1, f2) completely to memory before the bytecodes
130 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
set_field(Bytecodes::Code get_code,Bytecodes::Code put_code,Klass * field_holder,int field_index,int field_offset,TosState field_type,bool is_final,bool is_volatile)131 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
132                                        Bytecodes::Code put_code,
133                                        Klass* field_holder,
134                                        int field_index,
135                                        int field_offset,
136                                        TosState field_type,
137                                        bool is_final,
138                                        bool is_volatile) {
139   set_f1(field_holder);
140   set_f2(field_offset);
141   assert((field_index & field_index_mask) == field_index,
142          "field index does not fit in low flag bits");
143   set_field_flags(field_type,
144                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
145                   ((is_final    ? 1 : 0) << is_final_shift),
146                   field_index);
147   set_bytecode_1(get_code);
148   set_bytecode_2(put_code);
149   NOT_PRODUCT(verify(tty));
150 }
151 
set_parameter_size(int value)152 void ConstantPoolCacheEntry::set_parameter_size(int value) {
153   // This routine is called only in corner cases where the CPCE is not yet initialized.
154   // See AbstractInterpreter::deopt_continue_after_entry.
155   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
156          "size must not change: parameter_size=%d, value=%d", parameter_size(), value);
157   // Setting the parameter size by itself is only safe if the
158   // current value of _flags is 0, otherwise another thread may have
159   // updated it and we don't want to overwrite that value.  Don't
160   // bother trying to update it once it's nonzero but always make
161   // sure that the final parameter size agrees with what was passed.
162   if (_flags == 0) {
163     intx newflags = (value & parameter_size_mask);
164     Atomic::cmpxchg(&_flags, (intx)0, newflags);
165   }
166   guarantee(parameter_size() == value,
167             "size must not change: parameter_size=%d, value=%d", parameter_size(), value);
168 }
169 
set_direct_or_vtable_call(Bytecodes::Code invoke_code,const methodHandle & method,int vtable_index,bool sender_is_interface)170 void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
171                                                        const methodHandle& method,
172                                                        int vtable_index,
173                                                        bool sender_is_interface) {
174   bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
175   assert(method->interpreter_entry() != NULL, "should have been set at this point");
176   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
177 
178   int byte_no = -1;
179   bool change_to_virtual = false;
180   InstanceKlass* holder = NULL;  // have to declare this outside the switch
181   switch (invoke_code) {
182     case Bytecodes::_invokeinterface:
183       holder = method->method_holder();
184       // check for private interface method invocations
185       if (vtable_index == Method::nonvirtual_vtable_index && holder->is_interface() ) {
186         assert(method->is_private(), "unexpected non-private method");
187         assert(method->can_be_statically_bound(), "unexpected non-statically-bound method");
188         // set_f2_as_vfinal_method checks if is_vfinal flag is true.
189         set_method_flags(as_TosState(method->result_type()),
190                          (                             1      << is_vfinal_shift) |
191                          ((method->is_final_method() ? 1 : 0) << is_final_shift),
192                          method()->size_of_parameters());
193         set_f2_as_vfinal_method(method());
194         byte_no = 2;
195         set_f1(holder); // interface klass*
196         break;
197       }
198       else {
199         // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
200         // instruction links to a non-interface method (in Object). This can happen when
201         // an interface redeclares an Object method (like CharSequence declaring toString())
202         // or when invokeinterface is used explicitly.
203         // In that case, the method has no itable index and must be invoked as a virtual.
204         // Set a flag to keep track of this corner case.
205         assert(holder->is_interface() || holder == vmClasses::Object_klass(), "unexpected holder class");
206         assert(method->is_public(), "Calling non-public method in Object with invokeinterface");
207         change_to_virtual = true;
208 
209         // ...and fall through as if we were handling invokevirtual:
210       }
211     case Bytecodes::_invokevirtual:
212       {
213         if (!is_vtable_call) {
214           assert(method->can_be_statically_bound(), "");
215           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
216           set_method_flags(as_TosState(method->result_type()),
217                            (                             1      << is_vfinal_shift) |
218                            ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
219                            ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
220                            method()->size_of_parameters());
221           set_f2_as_vfinal_method(method());
222         } else {
223           assert(!method->can_be_statically_bound(), "");
224           assert(vtable_index >= 0, "valid index");
225           assert(!method->is_final_method(), "sanity");
226           set_method_flags(as_TosState(method->result_type()),
227                            ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
228                            method()->size_of_parameters());
229           set_f2(vtable_index);
230         }
231         byte_no = 2;
232         break;
233       }
234 
235     case Bytecodes::_invokespecial:
236     case Bytecodes::_invokestatic:
237       assert(!is_vtable_call, "");
238       // Note:  Read and preserve the value of the is_vfinal flag on any
239       // invokevirtual bytecode shared with this constant pool cache entry.
240       // It is cheap and safe to consult is_vfinal() at all times.
241       // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
242       set_method_flags(as_TosState(method->result_type()),
243                        ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
244                        ((method->is_final_method() ? 1 : 0) << is_final_shift),
245                        method()->size_of_parameters());
246       set_f1(method());
247       byte_no = 1;
248       break;
249     default:
250       ShouldNotReachHere();
251       break;
252   }
253 
254   // Note:  byte_no also appears in TemplateTable::resolve.
255   if (byte_no == 1) {
256     assert(invoke_code != Bytecodes::_invokevirtual &&
257            invoke_code != Bytecodes::_invokeinterface, "");
258     bool do_resolve = true;
259     // Don't mark invokespecial to method as resolved if sender is an interface.  The receiver
260     // has to be checked that it is a subclass of the current class every time this bytecode
261     // is executed.
262     if (invoke_code == Bytecodes::_invokespecial && sender_is_interface &&
263         method->name() != vmSymbols::object_initializer_name()) {
264       do_resolve = false;
265     }
266     if (invoke_code == Bytecodes::_invokestatic) {
267       assert(method->method_holder()->is_initialized() ||
268              method->method_holder()->is_reentrant_initialization(Thread::current()),
269              "invalid class initialization state for invoke_static");
270 
271       if (!VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
272         // Don't mark invokestatic to method as resolved if the holder class has not yet completed
273         // initialization. An invokestatic must only proceed if the class is initialized, but if
274         // we resolve it before then that class initialization check is skipped.
275         //
276         // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
277         // template interpreter supports fast class initialization check for
278         // invokestatic which doesn't require call site re-resolution to
279         // enforce class initialization barrier.
280         do_resolve = false;
281       }
282     }
283     if (do_resolve) {
284       set_bytecode_1(invoke_code);
285     }
286   } else if (byte_no == 2)  {
287     if (change_to_virtual) {
288       assert(invoke_code == Bytecodes::_invokeinterface, "");
289       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
290       //
291       // Workaround for the case where we encounter an invokeinterface, but we
292       // should really have an _invokevirtual since the resolved method is a
293       // virtual method in java.lang.Object. This is a corner case in the spec
294       // but is presumably legal. javac does not generate this code.
295       //
296       // We do not set bytecode_1() to _invokeinterface, because that is the
297       // bytecode # used by the interpreter to see if it is resolved.  In this
298       // case, the method gets reresolved with caller for each interface call
299       // because the actual selected method may not be public.
300       //
301       // We set bytecode_2() to _invokevirtual.
302       // See also interpreterRuntime.cpp. (8/25/2000)
303     } else {
304       assert(invoke_code == Bytecodes::_invokevirtual ||
305              (invoke_code == Bytecodes::_invokeinterface &&
306               ((method->is_private() ||
307                 (method->is_final() && method->method_holder() == vmClasses::Object_klass())))),
308              "unexpected invocation mode");
309       if (invoke_code == Bytecodes::_invokeinterface &&
310           (method->is_private() || method->is_final())) {
311         // We set bytecode_1() to _invokeinterface, because that is the
312         // bytecode # used by the interpreter to see if it is resolved.
313         // We set bytecode_2() to _invokevirtual.
314         set_bytecode_1(invoke_code);
315       }
316     }
317     // set up for invokevirtual, even if linking for invokeinterface also:
318     set_bytecode_2(Bytecodes::_invokevirtual);
319   } else {
320     ShouldNotReachHere();
321   }
322   NOT_PRODUCT(verify(tty));
323 }
324 
set_direct_call(Bytecodes::Code invoke_code,const methodHandle & method,bool sender_is_interface)325 void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, const methodHandle& method,
326                                              bool sender_is_interface) {
327   int index = Method::nonvirtual_vtable_index;
328   // index < 0; FIXME: inline and customize set_direct_or_vtable_call
329   set_direct_or_vtable_call(invoke_code, method, index, sender_is_interface);
330 }
331 
set_vtable_call(Bytecodes::Code invoke_code,const methodHandle & method,int index)332 void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, const methodHandle& method, int index) {
333   // either the method is a miranda or its holder should accept the given index
334   assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
335   // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
336   set_direct_or_vtable_call(invoke_code, method, index, false);
337 }
338 
set_itable_call(Bytecodes::Code invoke_code,Klass * referenced_klass,const methodHandle & method,int index)339 void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code,
340                                              Klass* referenced_klass,
341                                              const methodHandle& method, int index) {
342   assert(method->method_holder()->verify_itable_index(index), "");
343   assert(invoke_code == Bytecodes::_invokeinterface, "");
344   InstanceKlass* interf = method->method_holder();
345   assert(interf->is_interface(), "must be an interface");
346   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
347   set_f1(referenced_klass);
348   set_f2((intx)method());
349   set_method_flags(as_TosState(method->result_type()),
350                    0,  // no option bits
351                    method()->size_of_parameters());
352   set_bytecode_1(Bytecodes::_invokeinterface);
353 }
354 
355 
set_method_handle(const constantPoolHandle & cpool,const CallInfo & call_info)356 void ConstantPoolCacheEntry::set_method_handle(const constantPoolHandle& cpool, const CallInfo &call_info) {
357   set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info);
358 }
359 
set_dynamic_call(const constantPoolHandle & cpool,const CallInfo & call_info)360 void ConstantPoolCacheEntry::set_dynamic_call(const constantPoolHandle& cpool, const CallInfo &call_info) {
361   set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info);
362 }
363 
set_method_handle_common(const constantPoolHandle & cpool,Bytecodes::Code invoke_code,const CallInfo & call_info)364 void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool,
365                                                       Bytecodes::Code invoke_code,
366                                                       const CallInfo &call_info) {
367   // NOTE: This CPCE can be the subject of data races.
368   // There are three words to update: flags, refs[f2], f1 (in that order).
369   // Writers must store all other values before f1.
370   // Readers must test f1 first for non-null before reading other fields.
371   // Competing writers must acquire exclusive access via a lock.
372   // A losing writer waits on the lock until the winner writes f1 and leaves
373   // the lock, so that when the losing writer returns, he can use the linked
374   // cache entry.
375 
376   JavaThread* current = JavaThread::current();
377   objArrayHandle resolved_references(current, cpool->resolved_references());
378   // Use the resolved_references() lock for this cpCache entry.
379   // resolved_references are created for all classes with Invokedynamic, MethodHandle
380   // or MethodType constant pool cache entries.
381   assert(resolved_references() != NULL,
382          "a resolved_references array should have been created for this class");
383   ObjectLocker ol(resolved_references, current);
384   if (!is_f1_null()) {
385     return;
386   }
387 
388   if (indy_resolution_failed()) {
389     // Before we got here, another thread got a LinkageError exception during
390     // resolution.  Ignore our success and throw their exception.
391     ConstantPoolCache* cpCache = cpool->cache();
392     int index = -1;
393     for (int i = 0; i < cpCache->length(); i++) {
394       if (cpCache->entry_at(i) == this) {
395         index = i;
396         break;
397       }
398     }
399     guarantee(index >= 0, "Didn't find cpCache entry!");
400     int encoded_index = ResolutionErrorTable::encode_cpcache_index(
401                           ConstantPool::encode_invokedynamic_index(index));
402     JavaThread* THREAD = JavaThread::current(); // For exception macros.
403     ConstantPool::throw_resolution_error(cpool, encoded_index, THREAD);
404     return;
405   }
406 
407   Method* adapter            = call_info.resolved_method();
408   const Handle appendix      = call_info.resolved_appendix();
409   const bool has_appendix    = appendix.not_null();
410 
411   // Write the flags.
412   // MHs and indy are always sig-poly and have a local signature.
413   set_method_flags(as_TosState(adapter->result_type()),
414                    ((has_appendix    ? 1 : 0) << has_appendix_shift        ) |
415                    (                   1      << has_local_signature_shift ) |
416                    (                   1      << is_final_shift            ),
417                    adapter->size_of_parameters());
418 
419   LogStream* log_stream = NULL;
420   LogStreamHandle(Debug, methodhandles, indy) lsh_indy;
421   if (lsh_indy.is_enabled()) {
422     ResourceMark rm;
423     log_stream = &lsh_indy;
424     log_stream->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method=" PTR_FORMAT " (local signature) ",
425                          invoke_code,
426                          p2i(appendix()),
427                          (has_appendix ? "" : " (unused)"),
428                          p2i(adapter));
429     adapter->print_on(log_stream);
430     if (has_appendix)  appendix()->print_on(log_stream);
431   }
432 
433   // Method handle invokes and invokedynamic sites use both cp cache words.
434   // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
435   // In the general case, this could be the call site's MethodType,
436   // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
437   // f1 contains the adapter method which manages the actual call.
438   // In the general case, this is a compiled LambdaForm.
439   // (The Java code is free to optimize these calls by binding other
440   // sorts of methods and appendices to call sites.)
441   // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
442   // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
443   // Even with the appendix, the method will never take more than 255 parameter slots.
444   //
445   // This means that given a call site like (List)mh.invoke("foo"),
446   // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
447   // not '(Ljava/lang/String;)Ljava/util/List;'.
448   // The fact that String and List are involved is encoded in the MethodType in refs[f2].
449   // This allows us to create fewer Methods, while keeping type safety.
450   //
451 
452   // Store appendix, if any.
453   if (has_appendix) {
454     const int appendix_index = f2_as_index();
455     assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
456     assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
457     resolved_references->obj_at_put(appendix_index, appendix());
458   }
459 
460   release_set_f1(adapter);  // This must be the last one to set (see NOTE above)!
461 
462   // The interpreter assembly code does not check byte_2,
463   // but it is used by is_resolved, method_if_resolved, etc.
464   set_bytecode_1(invoke_code);
465   NOT_PRODUCT(verify(tty));
466 
467   if (log_stream != NULL) {
468     this->print(log_stream, 0);
469   }
470 
471   assert(has_appendix == this->has_appendix(), "proper storage of appendix flag");
472   assert(this->has_local_signature(), "proper storage of signature flag");
473 }
474 
save_and_throw_indy_exc(const constantPoolHandle & cpool,int cpool_index,int index,constantTag tag,TRAPS)475 bool ConstantPoolCacheEntry::save_and_throw_indy_exc(
476   const constantPoolHandle& cpool, int cpool_index, int index, constantTag tag, TRAPS) {
477 
478   assert(HAS_PENDING_EXCEPTION, "No exception got thrown!");
479   assert(PENDING_EXCEPTION->is_a(vmClasses::LinkageError_klass()),
480          "No LinkageError exception");
481 
482   // Use the resolved_references() lock for this cpCache entry.
483   // resolved_references are created for all classes with Invokedynamic, MethodHandle
484   // or MethodType constant pool cache entries.
485   JavaThread* current = THREAD;
486   objArrayHandle resolved_references(current, cpool->resolved_references());
487   assert(resolved_references() != NULL,
488          "a resolved_references array should have been created for this class");
489   ObjectLocker ol(resolved_references, current);
490 
491   // if f1 is not null or the indy_resolution_failed flag is set then another
492   // thread either succeeded in resolving the method or got a LinkageError
493   // exception, before this thread was able to record its failure.  So, clear
494   // this thread's exception and return false so caller can use the earlier
495   // thread's result.
496   if (!is_f1_null() || indy_resolution_failed()) {
497     CLEAR_PENDING_EXCEPTION;
498     return false;
499   }
500 
501   Symbol* error = PENDING_EXCEPTION->klass()->name();
502   Symbol* message = java_lang_Throwable::detail_message(PENDING_EXCEPTION);
503 
504   SystemDictionary::add_resolution_error(cpool, index, error, message);
505   set_indy_resolution_failed();
506   return true;
507 }
508 
method_if_resolved(const constantPoolHandle & cpool)509 Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpool) {
510   // Decode the action of set_method and set_interface_call
511   Bytecodes::Code invoke_code = bytecode_1();
512   if (invoke_code != (Bytecodes::Code)0) {
513     Metadata* f1 = f1_ord();
514     if (f1 != NULL) {
515       switch (invoke_code) {
516       case Bytecodes::_invokeinterface:
517         assert(f1->is_klass(), "");
518         return f2_as_interface_method();
519       case Bytecodes::_invokestatic:
520       case Bytecodes::_invokespecial:
521         assert(!has_appendix(), "");
522       case Bytecodes::_invokehandle:
523       case Bytecodes::_invokedynamic:
524         assert(f1->is_method(), "");
525         return (Method*)f1;
526       default:
527         break;
528       }
529     }
530   }
531   invoke_code = bytecode_2();
532   if (invoke_code != (Bytecodes::Code)0) {
533     switch (invoke_code) {
534     case Bytecodes::_invokevirtual:
535       if (is_vfinal()) {
536         // invokevirtual
537         Method* m = f2_as_vfinal_method();
538         assert(m->is_method(), "");
539         return m;
540       } else {
541         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
542         if (cpool->tag_at(holder_index).is_klass()) {
543           Klass* klass = cpool->resolved_klass_at(holder_index);
544           return klass->method_at_vtable(f2_as_index());
545         }
546       }
547       break;
548     default:
549       break;
550     }
551   }
552   return NULL;
553 }
554 
555 
appendix_if_resolved(const constantPoolHandle & cpool)556 oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) {
557   if (!has_appendix())
558     return NULL;
559   const int ref_index = f2_as_index();
560   objArrayOop resolved_references = cpool->resolved_references();
561   return resolved_references->obj_at(ref_index);
562 }
563 
564 
565 #if INCLUDE_JVMTI
566 
log_adjust(const char * entry_type,Method * old_method,Method * new_method,bool * trace_name_printed)567 void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) {
568   ResourceMark rm;
569 
570   if (!(*trace_name_printed)) {
571     log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name());
572     *trace_name_printed = true;
573   }
574   log_trace(redefine, class, update, constantpool)
575     ("cpc %s entry update: %s", entry_type, new_method->external_name());
576 }
577 
578 // RedefineClasses() API support:
579 // If this ConstantPoolCacheEntry refers to old_method then update it
580 // to refer to new_method.
adjust_method_entry(Method * old_method,Method * new_method,bool * trace_name_printed)581 void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
582        Method* new_method, bool * trace_name_printed) {
583 
584   if (is_vfinal()) {
585     // virtual and final so _f2 contains method ptr instead of vtable index
586     if (f2_as_vfinal_method() == old_method) {
587       // match old_method so need an update
588       // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
589       _f2 = (intptr_t)new_method;
590       log_adjust("vfinal", old_method, new_method, trace_name_printed);
591     }
592     return;
593   }
594 
595   assert (_f1 != NULL, "should not call with uninteresting entry");
596 
597   if (!(_f1->is_method())) {
598     // _f1 is a Klass* for an interface, _f2 is the method
599     if (f2_as_interface_method() == old_method) {
600       _f2 = (intptr_t)new_method;
601       log_adjust("interface", old_method, new_method, trace_name_printed);
602     }
603   } else if (_f1 == old_method) {
604     _f1 = new_method;
605     log_adjust("special, static or dynamic", old_method, new_method, trace_name_printed);
606   }
607 }
608 
609 // a constant pool cache entry should never contain old or obsolete methods
check_no_old_or_obsolete_entries()610 bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
611   Method* m = get_interesting_method_entry();
612   // return false if m refers to a non-deleted old or obsolete method
613   if (m != NULL) {
614     assert(m->is_valid() && m->is_method(), "m is a valid method");
615     return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete
616   } else {
617     return true;
618   }
619 }
620 
get_interesting_method_entry()621 Method* ConstantPoolCacheEntry::get_interesting_method_entry() {
622   if (!is_method_entry()) {
623     // not a method entry so not interesting by default
624     return NULL;
625   }
626   Method* m = NULL;
627   if (is_vfinal()) {
628     // virtual and final so _f2 contains method ptr instead of vtable index
629     m = f2_as_vfinal_method();
630   } else if (is_f1_null()) {
631     // NULL _f1 means this is a virtual entry so also not interesting
632     return NULL;
633   } else {
634     if (!(_f1->is_method())) {
635       // _f1 is a Klass* for an interface
636       m = f2_as_interface_method();
637     } else {
638       m = f1_as_method();
639     }
640   }
641   assert(m != NULL && m->is_method(), "sanity check");
642   if (m == NULL || !m->is_method()) {
643     return NULL;
644   }
645   return m;
646 }
647 #endif // INCLUDE_JVMTI
648 
print(outputStream * st,int index) const649 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
650   // print separator
651   if (index == 0) st->print_cr("                 -------------");
652   // print entry
653   st->print("%3d  (" PTR_FORMAT ")  ", index, (intptr_t)this);
654   st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(),
655                constant_pool_index());
656   st->print_cr("                 [   " PTR_FORMAT "]", (intptr_t)_f1);
657   st->print_cr("                 [   " PTR_FORMAT "]", (intptr_t)_f2);
658   st->print_cr("                 [   " PTR_FORMAT "]", (intptr_t)_flags);
659   st->print_cr("                 -------------");
660 }
661 
verify(outputStream * st) const662 void ConstantPoolCacheEntry::verify(outputStream* st) const {
663   // not implemented yet
664 }
665 
666 // Implementation of ConstantPoolCache
667 
allocate(ClassLoaderData * loader_data,const intStack & index_map,const intStack & invokedynamic_index_map,const intStack & invokedynamic_map,TRAPS)668 ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
669                                      const intStack& index_map,
670                                      const intStack& invokedynamic_index_map,
671                                      const intStack& invokedynamic_map, TRAPS) {
672 
673   const int length = index_map.length() + invokedynamic_index_map.length();
674   int size = ConstantPoolCache::size(length);
675 
676   return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD)
677     ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
678 }
679 
initialize(const intArray & inverse_index_map,const intArray & invokedynamic_inverse_index_map,const intArray & invokedynamic_references_map)680 void ConstantPoolCache::initialize(const intArray& inverse_index_map,
681                                    const intArray& invokedynamic_inverse_index_map,
682                                    const intArray& invokedynamic_references_map) {
683   for (int i = 0; i < inverse_index_map.length(); i++) {
684     ConstantPoolCacheEntry* e = entry_at(i);
685     int original_index = inverse_index_map.at(i);
686     e->initialize_entry(original_index);
687     assert(entry_at(i) == e, "sanity");
688   }
689 
690   // Append invokedynamic entries at the end
691   int invokedynamic_offset = inverse_index_map.length();
692   for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
693     int offset = i + invokedynamic_offset;
694     ConstantPoolCacheEntry* e = entry_at(offset);
695     int original_index = invokedynamic_inverse_index_map.at(i);
696     e->initialize_entry(original_index);
697     assert(entry_at(offset) == e, "sanity");
698   }
699 
700   for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
701     const int cpci = invokedynamic_references_map.at(ref);
702     if (cpci >= 0) {
703       entry_at(cpci)->initialize_resolved_reference_index(ref);
704     }
705   }
706 }
707 
verify_just_initialized()708 void ConstantPoolCache::verify_just_initialized() {
709   DEBUG_ONLY(walk_entries_for_initialization(/*check_only = */ true));
710 }
711 
remove_unshareable_info()712 void ConstantPoolCache::remove_unshareable_info() {
713   walk_entries_for_initialization(/*check_only = */ false);
714 }
715 
walk_entries_for_initialization(bool check_only)716 void ConstantPoolCache::walk_entries_for_initialization(bool check_only) {
717   Arguments::assert_is_dumping_archive();
718   // When dumping the archive, we want to clean up the ConstantPoolCache
719   // to remove any effect of linking due to the execution of Java code --
720   // each ConstantPoolCacheEntry will have the same contents as if
721   // ConstantPoolCache::initialize has just returned:
722   //
723   // - We keep the ConstantPoolCache::constant_pool_index() bits for all entries.
724   // - We keep the "f2" field for entries used by invokedynamic and invokehandle
725   // - All other bits in the entries are cleared to zero.
726   ResourceMark rm;
727 
728   InstanceKlass* ik = constant_pool()->pool_holder();
729   bool* f2_used = NEW_RESOURCE_ARRAY(bool, length());
730   memset(f2_used, 0, sizeof(bool) * length());
731 
732   Thread* current = Thread::current();
733 
734   // Find all the slots that we need to preserve f2
735   for (int i = 0; i < ik->methods()->length(); i++) {
736     Method* m = ik->methods()->at(i);
737     RawBytecodeStream bcs(methodHandle(current, m));
738     while (!bcs.is_last_bytecode()) {
739       Bytecodes::Code opcode = bcs.raw_next();
740       switch (opcode) {
741       case Bytecodes::_invokedynamic: {
742           int index = Bytes::get_native_u4(bcs.bcp() + 1);
743           int cp_cache_index = constant_pool()->invokedynamic_cp_cache_index(index);
744           f2_used[cp_cache_index] = 1;
745         }
746         break;
747       case Bytecodes::_invokehandle: {
748           int cp_cache_index = Bytes::get_native_u2(bcs.bcp() + 1);
749           f2_used[cp_cache_index] = 1;
750         }
751         break;
752       default:
753         break;
754       }
755     }
756   }
757 
758   if (check_only) {
759     DEBUG_ONLY(
760       for (int i=0; i<length(); i++) {
761         entry_at(i)->verify_just_initialized(f2_used[i]);
762       })
763   } else {
764     for (int i=0; i<length(); i++) {
765       entry_at(i)->reinitialize(f2_used[i]);
766     }
767   }
768 }
769 
deallocate_contents(ClassLoaderData * data)770 void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
771   assert(!is_shared(), "shared caches are not deallocated");
772   data->remove_handle(_resolved_references);
773   set_resolved_references(OopHandle());
774   MetadataFactory::free_array<u2>(data, _reference_map);
775   set_reference_map(NULL);
776 }
777 
778 #if INCLUDE_CDS_JAVA_HEAP
archived_references()779 oop ConstantPoolCache::archived_references() {
780   if (_archived_references_index < 0) {
781     return NULL;
782   }
783   return HeapShared::get_root(_archived_references_index);
784 }
785 
clear_archived_references()786 void ConstantPoolCache::clear_archived_references() {
787   if (_archived_references_index >= 0) {
788     HeapShared::clear_root(_archived_references_index);
789     _archived_references_index = -1;
790   }
791 }
792 
set_archived_references(oop o)793 void ConstantPoolCache::set_archived_references(oop o) {
794   assert(DumpSharedSpaces, "called only during runtime");
795   _archived_references_index = HeapShared::append_root(o);
796 }
797 #endif
798 
799 #if INCLUDE_JVMTI
800 // RedefineClasses() API support:
801 // If any entry of this ConstantPoolCache points to any of
802 // old_methods, replace it with the corresponding new_method.
adjust_method_entries(bool * trace_name_printed)803 void ConstantPoolCache::adjust_method_entries(bool * trace_name_printed) {
804   for (int i = 0; i < length(); i++) {
805     ConstantPoolCacheEntry* entry = entry_at(i);
806     Method* old_method = entry->get_interesting_method_entry();
807     if (old_method == NULL || !old_method->is_old()) {
808       continue; // skip uninteresting entries
809     }
810     if (old_method->is_deleted()) {
811       // clean up entries with deleted methods
812       entry->initialize_entry(entry->constant_pool_index());
813       continue;
814     }
815     Method* new_method = old_method->get_new_method();
816     entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);
817   }
818 }
819 
820 // the constant pool cache should never contain old or obsolete methods
check_no_old_or_obsolete_entries()821 bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
822   ResourceMark rm;
823   for (int i = 1; i < length(); i++) {
824     Method* m = entry_at(i)->get_interesting_method_entry();
825     if (m != NULL && !entry_at(i)->check_no_old_or_obsolete_entries()) {
826       log_trace(redefine, class, update, constantpool)
827         ("cpcache check found old method entry: class: %s, old: %d, obsolete: %d, method: %s",
828          constant_pool()->pool_holder()->external_name(), m->is_old(), m->is_obsolete(), m->external_name());
829       return false;
830     }
831   }
832   return true;
833 }
834 
dump_cache()835 void ConstantPoolCache::dump_cache() {
836   for (int i = 1; i < length(); i++) {
837     if (entry_at(i)->get_interesting_method_entry() != NULL) {
838       entry_at(i)->print(tty, i);
839     }
840   }
841 }
842 #endif // INCLUDE_JVMTI
843 
metaspace_pointers_do(MetaspaceClosure * it)844 void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) {
845   log_trace(cds)("Iter(ConstantPoolCache): %p", this);
846   it->push(&_constant_pool);
847   it->push(&_reference_map);
848 }
849 
850 // Printing
851 
print_on(outputStream * st) const852 void ConstantPoolCache::print_on(outputStream* st) const {
853   st->print_cr("%s", internal_name());
854   // print constant pool cache entries
855   for (int i = 0; i < length(); i++) entry_at(i)->print(st, i);
856 }
857 
print_value_on(outputStream * st) const858 void ConstantPoolCache::print_value_on(outputStream* st) const {
859   st->print("cache [%d]", length());
860   print_address_on(st);
861   st->print(" for ");
862   constant_pool()->print_value_on(st);
863 }
864 
865 
866 // Verification
867 
verify_on(outputStream * st)868 void ConstantPoolCache::verify_on(outputStream* st) {
869   // print constant pool cache entries
870   for (int i = 0; i < length(); i++) entry_at(i)->verify(st);
871 }
872