1 /*
2  * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
26 #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
27 
28 #include "memory/padded.hpp"
29 #include "oops/markOop.hpp"
30 #include "runtime/basicLock.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/perfData.hpp"
33 
34 class ObjectMonitor;
35 class ThreadsList;
36 
37 struct DeflateMonitorCounters {
38   int nInuse;             // currently associated with objects
39   int nInCirculation;     // extant
40   int nScavenged;         // reclaimed
41   double perThreadTimes;  // per-thread scavenge times
42 };
43 
44 class ObjectSynchronizer : AllStatic {
45   friend class VMStructs;
46  public:
47   typedef enum {
48     owner_self,
49     owner_none,
50     owner_other
51   } LockOwnership;
52 
53   typedef enum {
54     inflate_cause_vm_internal = 0,
55     inflate_cause_monitor_enter = 1,
56     inflate_cause_wait = 2,
57     inflate_cause_notify = 3,
58     inflate_cause_hash_code = 4,
59     inflate_cause_jni_enter = 5,
60     inflate_cause_jni_exit = 6,
61     inflate_cause_nof = 7 // Number of causes
62   } InflateCause;
63 
64   // exit must be implemented non-blocking, since the compiler cannot easily handle
65   // deoptimization at monitor exit. Hence, it does not take a Handle argument.
66 
67   // This is full version of monitor enter and exit. I choose not
68   // to use enter() and exit() in order to make sure user be ware
69   // of the performance and semantics difference. They are normally
70   // used by ObjectLocker etc. The interpreter and compiler use
71   // assembly copies of these routines. Please keep them synchronized.
72   //
73   // attempt_rebias flag is used by UseBiasedLocking implementation
74   static void fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias,
75                          TRAPS);
76   static void fast_exit(oop obj, BasicLock* lock, Thread* THREAD);
77 
78   // WARNING: They are ONLY used to handle the slow cases. They should
79   // only be used when the fast cases failed. Use of these functions
80   // without previous fast case check may cause fatal error.
81   static void slow_enter(Handle obj, BasicLock* lock, TRAPS);
82   static void slow_exit(oop obj, BasicLock* lock, Thread* THREAD);
83 
84   // Used only to handle jni locks or other unmatched monitor enter/exit
85   // Internally they will use heavy weight monitor.
86   static void jni_enter(Handle obj, TRAPS);
87   static void jni_exit(oop obj, Thread* THREAD);
88 
89   // Handle all interpreter, compiler and jni cases
90   static int  wait(Handle obj, jlong millis, TRAPS);
91   static void notify(Handle obj, TRAPS);
92   static void notifyall(Handle obj, TRAPS);
93 
94   static bool quick_notify(oopDesc* obj, Thread* Self, bool All);
95   static bool quick_enter(oop obj, Thread* Self, BasicLock* Lock);
96 
97   // Special internal-use-only method for use by JVM infrastructure
98   // that needs to wait() on a java-level object but that can't risk
99   // throwing unexpected InterruptedExecutionExceptions.
100   static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD);
101 
102   // used by classloading to free classloader object lock,
103   // wait on an internal lock, and reclaim original lock
104   // with original recursion count
105   static intptr_t complete_exit(Handle obj, TRAPS);
106   static void reenter (Handle obj, intptr_t recursion, TRAPS);
107 
108   // thread-specific and global objectMonitor free list accessors
109   static ObjectMonitor * omAlloc(Thread * Self);
110   static void omRelease(Thread * Self, ObjectMonitor * m,
111                         bool FromPerThreadAlloc);
112   static void omFlush(Thread * Self);
113 
114   // Inflate light weight monitor to heavy weight monitor
115   static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause);
116   // This version is only for internal use
117   static ObjectMonitor* inflate_helper(oop obj);
118   static const char* inflate_cause_name(const InflateCause cause);
119 
120   // Returns the identity hash value for an oop
121   // NOTE: It may cause monitor inflation
122   static intptr_t identity_hash_value_for(Handle obj);
123   static intptr_t FastHashCode(Thread * Self, oop obj);
124 
125   // java.lang.Thread support
126   static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
127   static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
128 
129   static JavaThread* get_lock_owner(ThreadsList * t_list, Handle h_obj);
130 
131   // JNI detach support
132   static void release_monitors_owned_by_thread(TRAPS);
133   static void monitors_iterate(MonitorClosure* m);
134 
135   // GC: we current use aggressive monitor deflation policy
136   // Basically we deflate all monitors that are not busy.
137   // An adaptive profile-based deflation policy could be used if needed
138   static void deflate_idle_monitors(DeflateMonitorCounters* counters);
139   static void deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters);
140   static void prepare_deflate_idle_monitors(DeflateMonitorCounters* counters);
141   static void finish_deflate_idle_monitors(DeflateMonitorCounters* counters);
142 
143   // For a given monitor list: global or per-thread, deflate idle monitors
144   static int deflate_monitor_list(ObjectMonitor** listheadp,
145                                   ObjectMonitor** freeHeadp,
146                                   ObjectMonitor** freeTailp);
147   static bool deflate_monitor(ObjectMonitor* mid, oop obj,
148                               ObjectMonitor** freeHeadp,
149                               ObjectMonitor** freeTailp);
150   static bool is_cleanup_needed();
151   static void oops_do(OopClosure* f);
152   // Process oops in thread local used monitors
153   static void thread_local_used_oops_do(Thread* thread, OopClosure* f);
154 
155   // debugging
156   static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
157 
158  private:
159   friend class SynchronizerTest;
160 
161   enum { _BLOCKSIZE = 128 };
162   // global list of blocks of monitors
163   static PaddedEnd<ObjectMonitor> * volatile gBlockList;
164   // global monitor free list
165   static ObjectMonitor * volatile gFreeList;
166   // global monitor in-use list, for moribund threads,
167   // monitors they inflated need to be scanned for deflation
168   static ObjectMonitor * volatile gOmInUseList;
169   // count of entries in gOmInUseList
170   static int gOmInUseCount;
171 
172   // Process oops in all global used monitors (i.e. moribund thread's monitors)
173   static void global_used_oops_do(OopClosure* f);
174   // Process oops in monitors on the given list
175   static void list_oops_do(ObjectMonitor* list, OopClosure* f);
176 
177   // Support for SynchronizerTest access to GVars fields:
178   static u_char* get_gvars_addr();
179   static u_char* get_gvars_hcSequence_addr();
180   static size_t get_gvars_size();
181   static u_char* get_gvars_stwRandom_addr();
182 };
183 
184 // ObjectLocker enforced balanced locking and can never thrown an
185 // IllegalMonitorStateException. However, a pending exception may
186 // have to pass through, and we must also be able to deal with
187 // asynchronous exceptions. The caller is responsible for checking
188 // the threads pending exception if needed.
189 class ObjectLocker : public StackObj {
190  private:
191   Thread*   _thread;
192   Handle    _obj;
193   BasicLock _lock;
194   bool      _dolock;   // default true
195  public:
196   ObjectLocker(Handle obj, Thread* thread, bool doLock = true);
197   ~ObjectLocker();
198 
199   // Monitor behavior
wait(TRAPS)200   void wait(TRAPS)  { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever
notify_all(TRAPS)201   void notify_all(TRAPS)  { ObjectSynchronizer::notifyall(_obj, CHECK); }
waitUninterruptibly(TRAPS)202   void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); }
203   // complete_exit gives up lock completely, returning recursion count
204   // reenter reclaims lock with original recursion count
complete_exit(TRAPS)205   intptr_t complete_exit(TRAPS)  { return ObjectSynchronizer::complete_exit(_obj, THREAD); }
reenter(intptr_t recursion,TRAPS)206   void reenter(intptr_t recursion, TRAPS)  { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
207 };
208 
209 #endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP
210