1 /*
2  * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_RUNTIME_SYNCHRONIZER_HPP
26 #define SHARE_RUNTIME_SYNCHRONIZER_HPP
27 
28 #include "memory/padded.hpp"
29 #include "oops/markWord.hpp"
30 #include "runtime/basicLock.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/perfData.hpp"
33 
34 class ObjectMonitor;
35 class ThreadsList;
36 
37 #ifndef OM_CACHE_LINE_SIZE
38 // Use DEFAULT_CACHE_LINE_SIZE if not already specified for
39 // the current build platform.
40 #define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
41 #endif
42 
43 typedef PaddedEnd<ObjectMonitor, OM_CACHE_LINE_SIZE> PaddedObjectMonitor;
44 
45 struct DeflateMonitorCounters {
46   volatile int n_in_use;              // currently associated with objects
47   volatile int n_in_circulation;      // extant
48   volatile int n_scavenged;           // reclaimed (global and per-thread)
49   volatile int per_thread_scavenged;  // per-thread scavenge total
50            double per_thread_times;   // per-thread scavenge times
51 };
52 
53 class ObjectSynchronizer : AllStatic {
54   friend class VMStructs;
55  public:
56   typedef enum {
57     owner_self,
58     owner_none,
59     owner_other
60   } LockOwnership;
61 
62   typedef enum {
63     inflate_cause_vm_internal = 0,
64     inflate_cause_monitor_enter = 1,
65     inflate_cause_wait = 2,
66     inflate_cause_notify = 3,
67     inflate_cause_hash_code = 4,
68     inflate_cause_jni_enter = 5,
69     inflate_cause_jni_exit = 6,
70     inflate_cause_nof = 7 // Number of causes
71   } InflateCause;
72 
73   // exit must be implemented non-blocking, since the compiler cannot easily handle
74   // deoptimization at monitor exit. Hence, it does not take a Handle argument.
75 
76   // This is the "slow path" version of monitor enter and exit.
77   static void enter(Handle obj, BasicLock* lock, TRAPS);
78   static void exit(oop obj, BasicLock* lock, Thread* THREAD);
79 
80   // Used only to handle jni locks or other unmatched monitor enter/exit
81   // Internally they will use heavy weight monitor.
82   static void jni_enter(Handle obj, TRAPS);
83   static void jni_exit(oop obj, Thread* THREAD);
84 
85   // Handle all interpreter, compiler and jni cases
86   static int  wait(Handle obj, jlong millis, TRAPS);
87   static void notify(Handle obj, TRAPS);
88   static void notifyall(Handle obj, TRAPS);
89 
90   static bool quick_notify(oopDesc* obj, Thread* self, bool All);
91   static bool quick_enter(oop obj, Thread* self, BasicLock* Lock);
92 
93   // Special internal-use-only method for use by JVM infrastructure
94   // that needs to wait() on a java-level object but that can't risk
95   // throwing unexpected InterruptedExecutionExceptions.
96   static void wait_uninterruptibly(Handle obj, jlong Millis, Thread* THREAD);
97 
98   // used by classloading to free classloader object lock,
99   // wait on an internal lock, and reclaim original lock
100   // with original recursion count
101   static intx complete_exit(Handle obj, TRAPS);
102   static void reenter (Handle obj, intx recursions, TRAPS);
103 
104   // thread-specific and global ObjectMonitor free list accessors
105   static ObjectMonitor* om_alloc(Thread* self);
106   static void om_release(Thread* self, ObjectMonitor* m,
107                          bool FromPerThreadAlloc);
108   static void om_flush(Thread* self);
109 
110   // Inflate light weight monitor to heavy weight monitor
111   static ObjectMonitor* inflate(Thread* self, oop obj, const InflateCause cause);
112   // This version is only for internal use
113   static void inflate_helper(oop obj);
114   static const char* inflate_cause_name(const InflateCause cause);
115 
116   // Returns the identity hash value for an oop
117   // NOTE: It may cause monitor inflation
118   static intptr_t identity_hash_value_for(Handle obj);
119   static intptr_t FastHashCode(Thread* self, oop obj);
120 
121   // java.lang.Thread support
122   static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
123   static LockOwnership query_lock_ownership(JavaThread* self, Handle h_obj);
124 
125   static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
126 
127   // JNI detach support
128   static void release_monitors_owned_by_thread(TRAPS);
129   static void monitors_iterate(MonitorClosure* m);
130 
131   // GC: we current use aggressive monitor deflation policy
132   // Basically we deflate all monitors that are not busy.
133   // An adaptive profile-based deflation policy could be used if needed
134   static void deflate_idle_monitors(DeflateMonitorCounters* counters);
135   static void deflate_idle_monitors_using_JT();
136   static void deflate_global_idle_monitors_using_JT();
137   static void deflate_per_thread_idle_monitors_using_JT(JavaThread* target);
138   static void deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target);
139   static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
140   static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
141   static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
142 
143   // For a given monitor list: global or per-thread, deflate idle monitors
144   static int deflate_monitor_list(ObjectMonitor** list_p,
145                                   int* count_p,
146                                   ObjectMonitor** free_head_p,
147                                   ObjectMonitor** free_tail_p);
148   // For a given in-use monitor list: global or per-thread, deflate idle
149   // monitors using a JavaThread.
150   static int deflate_monitor_list_using_JT(ObjectMonitor** list_p,
151                                            int* count_p,
152                                            ObjectMonitor** free_head_p,
153                                            ObjectMonitor** free_tail_p,
154                                            ObjectMonitor** saved_mid_in_use_p);
155   static bool deflate_monitor(ObjectMonitor* mid, oop obj,
156                               ObjectMonitor** free_head_p,
157                               ObjectMonitor** free_tail_p);
158   static bool deflate_monitor_using_JT(ObjectMonitor* mid,
159                                        ObjectMonitor** free_head_p,
160                                        ObjectMonitor** free_tail_p);
161   static bool is_async_deflation_needed();
162   static bool is_safepoint_deflation_needed();
is_async_deflation_requested()163   static bool is_async_deflation_requested() { return _is_async_deflation_requested; }
is_special_deflation_requested()164   static bool is_special_deflation_requested() { return _is_special_deflation_requested; }
set_is_async_deflation_requested(bool new_value)165   static void set_is_async_deflation_requested(bool new_value) { _is_async_deflation_requested = new_value; }
set_is_special_deflation_requested(bool new_value)166   static void set_is_special_deflation_requested(bool new_value) { _is_special_deflation_requested = new_value; }
167   static jlong time_since_last_async_deflation_ms();
168   static void oops_do(OopClosure* f);
169   // Process oops in thread local used monitors
170   static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
171 
172   // debugging
173   static void audit_and_print_stats(bool on_exit);
174   static void chk_free_entry(JavaThread* jt, ObjectMonitor* n,
175                              outputStream * out, int *error_cnt_p);
176   static void chk_global_free_list_and_count(outputStream * out,
177                                              int *error_cnt_p);
178   static void chk_global_wait_list_and_count(outputStream * out,
179                                              int *error_cnt_p);
180   static void chk_global_in_use_list_and_count(outputStream * out,
181                                                int *error_cnt_p);
182   static void chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
183                                outputStream * out, int *error_cnt_p);
184   static void chk_per_thread_in_use_list_and_count(JavaThread *jt,
185                                                    outputStream * out,
186                                                    int *error_cnt_p);
187   static void chk_per_thread_free_list_and_count(JavaThread *jt,
188                                                  outputStream * out,
189                                                  int *error_cnt_p);
190   static void log_in_use_monitor_details(outputStream * out);
191   static int  log_monitor_list_counts(outputStream * out);
192   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
193 
194   static void do_safepoint_work(DeflateMonitorCounters* counters);
195 
196  private:
197   friend class SynchronizerTest;
198 
199   enum { _BLOCKSIZE = 128 };
200   // global list of blocks of monitors
201   static PaddedObjectMonitor* g_block_list;
202   static volatile bool _is_async_deflation_requested;
203   static volatile bool _is_special_deflation_requested;
204   static jlong         _last_async_deflation_time_ns;
205 
206   // Function to prepend new blocks to the appropriate lists:
207   static void prepend_block_to_lists(PaddedObjectMonitor* new_blk);
208 
209   // Process oops in all global used monitors (i.e. moribund thread's monitors)
210   static void global_used_oops_do(OopClosure* f);
211   // Process oops in monitors on the given list
212   static void list_oops_do(ObjectMonitor* list, OopClosure* f);
213 
214   // Support for SynchronizerTest access to GVars fields:
215   static u_char* get_gvars_addr();
216   static u_char* get_gvars_hc_sequence_addr();
217   static size_t get_gvars_size();
218   static u_char* get_gvars_stw_random_addr();
219 };
220 
221 // ObjectLocker enforces balanced locking and can never throw an
222 // IllegalMonitorStateException. However, a pending exception may
223 // have to pass through, and we must also be able to deal with
224 // asynchronous exceptions. The caller is responsible for checking
225 // the thread's pending exception if needed.
226 class ObjectLocker : public StackObj {
227  private:
228   Thread*   _thread;
229   Handle    _obj;
230   BasicLock _lock;
231   bool      _dolock;   // default true
232  public:
233   ObjectLocker(Handle obj, Thread* thread, bool do_lock = true);
234   ~ObjectLocker();
235 
236   // Monitor behavior
wait(TRAPS)237   void wait(TRAPS)  { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever
notify_all(TRAPS)238   void notify_all(TRAPS)  { ObjectSynchronizer::notifyall(_obj, CHECK); }
wait_uninterruptibly(TRAPS)239   void wait_uninterruptibly(TRAPS) { ObjectSynchronizer::wait_uninterruptibly(_obj, 0, CHECK); }
240   // complete_exit gives up lock completely, returning recursion count
241   // reenter reclaims lock with original recursion count
complete_exit(TRAPS)242   intx complete_exit(TRAPS)  { return ObjectSynchronizer::complete_exit(_obj, THREAD); }
reenter(intx recursions,TRAPS)243   void reenter(intx recursions, TRAPS)  { ObjectSynchronizer::reenter(_obj, recursions, CHECK); }
244 };
245 
246 #endif // SHARE_RUNTIME_SYNCHRONIZER_HPP
247