1 /*
2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/biasedLocking.hpp"
32 #include "runtime/handles.inline.hpp"
33 #include "runtime/interfaceSupport.hpp"
34 #include "runtime/mutexLocker.hpp"
35 #include "runtime/objectMonitor.hpp"
36 #include "runtime/objectMonitor.inline.hpp"
37 #include "runtime/osThread.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "runtime/synchronizer.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "utilities/dtrace.hpp"
42 #include "utilities/events.hpp"
43 #include "utilities/preserveException.hpp"
44 #ifdef TARGET_OS_FAMILY_linux
45 # include "os_linux.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_solaris
48 # include "os_solaris.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_FAMILY_windows
51 # include "os_windows.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_FAMILY_bsd
54 # include "os_bsd.inline.hpp"
55 #endif
56
57 #if defined(__GNUC__) && !defined(PPC64)
58 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
59 #define ATTR __attribute__((noinline))
60 #else
61 #define ATTR
62 #endif
63
64 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
65
66 // The "core" versions of monitor enter and exit reside in this file.
67 // The interpreter and compilers contain specialized transliterated
68 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
69 // for instance. If you make changes here, make sure to modify the
70 // interpreter, and both C1 and C2 fast-path inline locking code emission.
71 //
72 //
73 // -----------------------------------------------------------------------------
74
75 #ifdef DTRACE_ENABLED
76
77 // Only bother with this argument setup if dtrace is available
78 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
79
80 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
81 char* bytes = NULL; \
82 int len = 0; \
83 jlong jtid = SharedRuntime::get_java_tid(thread); \
84 Symbol* klassname = ((oop)(obj))->klass()->name(); \
85 if (klassname != NULL) { \
86 bytes = (char*)klassname->bytes(); \
87 len = klassname->utf8_length(); \
88 }
89
90 #ifndef USDT2
91 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
92 jlong, uintptr_t, char*, int, long);
93 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
94 jlong, uintptr_t, char*, int);
95
96 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
97 { \
98 if (DTraceMonitorProbes) { \
99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
100 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
101 (monitor), bytes, len, (millis)); \
102 } \
103 }
104
105 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
106 { \
107 if (DTraceMonitorProbes) { \
108 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
109 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
110 (uintptr_t)(monitor), bytes, len); \
111 } \
112 }
113
114 #else /* USDT2 */
115
116 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
117 { \
118 if (DTraceMonitorProbes) { \
119 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
120 HOTSPOT_MONITOR_WAIT(jtid, \
121 (uintptr_t)(monitor), bytes, len, (millis)); \
122 } \
123 }
124
125 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
126
127 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
128 { \
129 if (DTraceMonitorProbes) { \
130 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
131 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
132 (uintptr_t)(monitor), bytes, len); \
133 } \
134 }
135
136 #endif /* USDT2 */
137 #else // ndef DTRACE_ENABLED
138
139 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
140 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
141
142 #endif // ndef DTRACE_ENABLED
143
144 // This exists only as a workaround of dtrace bug 6254741
dtrace_waited_probe(ObjectMonitor * monitor,Handle obj,Thread * thr)145 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
146 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
147 return 0;
148 }
149
150 #define NINFLATIONLOCKS 256
151 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
152
153 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
154 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
155 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
156 int ObjectSynchronizer::gOmInUseCount = 0;
157 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
158 static volatile int MonitorFreeCount = 0 ; // # on gFreeList
159 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
160 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
161
162 // -----------------------------------------------------------------------------
163 // Fast Monitor Enter/Exit
164 // This the fast monitor enter. The interpreter and compiler use
165 // some assembly copies of this code. Make sure update those code
166 // if the following function is changed. The implementation is
167 // extremely sensitive to race condition. Be careful.
168
fast_enter(Handle obj,BasicLock * lock,bool attempt_rebias,TRAPS)169 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
170 if (UseBiasedLocking) {
171 if (!SafepointSynchronize::is_at_safepoint()) {
172 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
173 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
174 return;
175 }
176 } else {
177 assert(!attempt_rebias, "can not rebias toward VM thread");
178 BiasedLocking::revoke_at_safepoint(obj);
179 }
180 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
181 }
182
183 slow_enter (obj, lock, THREAD) ;
184 }
185
fast_exit(oop object,BasicLock * lock,TRAPS)186 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
187 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
188 // if displaced header is null, the previous enter is recursive enter, no-op
189 markOop dhw = lock->displaced_header();
190 markOop mark ;
191 if (dhw == NULL) {
192 // Recursive stack-lock.
193 // Diagnostics -- Could be: stack-locked, inflating, inflated.
194 mark = object->mark() ;
195 assert (!mark->is_neutral(), "invariant") ;
196 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
197 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
198 }
199 if (mark->has_monitor()) {
200 ObjectMonitor * m = mark->monitor() ;
201 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
202 assert(m->is_entered(THREAD), "invariant") ;
203 }
204 return ;
205 }
206
207 mark = object->mark() ;
208
209 // If the object is stack-locked by the current thread, try to
210 // swing the displaced header from the box back to the mark.
211 if (mark == (markOop) lock) {
212 assert (dhw->is_neutral(), "invariant") ;
213 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
214 TEVENT (fast_exit: release stacklock) ;
215 return;
216 }
217 }
218
219 ObjectSynchronizer::inflate(THREAD,
220 object,
221 inflate_cause_vm_internal)->exit(true, THREAD);
222 }
223
224 // -----------------------------------------------------------------------------
225 // Interpreter/Compiler Slow Case
226 // This routine is used to handle interpreter/compiler slow case
227 // We don't need to use fast path here, because it must have been
228 // failed in the interpreter/compiler code.
slow_enter(Handle obj,BasicLock * lock,TRAPS)229 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
230 markOop mark = obj->mark();
231 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
232
233 if (mark->is_neutral()) {
234 // Anticipate successful CAS -- the ST of the displaced mark must
235 // be visible <= the ST performed by the CAS.
236 lock->set_displaced_header(mark);
237 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
238 TEVENT (slow_enter: release stacklock) ;
239 return ;
240 }
241 // Fall through to inflate() ...
242 } else
243 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
244 assert(lock != mark->locker(), "must not re-lock the same lock");
245 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
246 lock->set_displaced_header(NULL);
247 return;
248 }
249
250 #if 0
251 // The following optimization isn't particularly useful.
252 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
253 lock->set_displaced_header (NULL) ;
254 return ;
255 }
256 #endif
257
258 // The object header will never be displaced to this lock,
259 // so it does not matter what the value is, except that it
260 // must be non-zero to avoid looking like a re-entrant lock,
261 // and must not look locked either.
262 lock->set_displaced_header(markOopDesc::unused_mark());
263 ObjectSynchronizer::inflate(THREAD,
264 obj(),
265 inflate_cause_monitor_enter)->enter(THREAD);
266 }
267
268 // This routine is used to handle interpreter/compiler slow case
269 // We don't need to use fast path here, because it must have
270 // failed in the interpreter/compiler code. Simply use the heavy
271 // weight monitor should be ok, unless someone find otherwise.
slow_exit(oop object,BasicLock * lock,TRAPS)272 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
273 fast_exit (object, lock, THREAD) ;
274 }
275
276 // -----------------------------------------------------------------------------
277 // Class Loader support to workaround deadlocks on the class loader lock objects
278 // Also used by GC
279 // complete_exit()/reenter() are used to wait on a nested lock
280 // i.e. to give up an outer lock completely and then re-enter
281 // Used when holding nested locks - lock acquisition order: lock1 then lock2
282 // 1) complete_exit lock1 - saving recursion count
283 // 2) wait on lock2
284 // 3) when notified on lock2, unlock lock2
285 // 4) reenter lock1 with original recursion count
286 // 5) lock lock2
287 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
complete_exit(Handle obj,TRAPS)288 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
289 TEVENT (complete_exit) ;
290 if (UseBiasedLocking) {
291 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
292 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
293 }
294
295 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
296 obj(),
297 inflate_cause_vm_internal);
298
299 return monitor->complete_exit(THREAD);
300 }
301
302 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
reenter(Handle obj,intptr_t recursion,TRAPS)303 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
304 TEVENT (reenter) ;
305 if (UseBiasedLocking) {
306 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
307 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
308 }
309
310 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
311 obj(),
312 inflate_cause_vm_internal);
313
314 monitor->reenter(recursion, THREAD);
315 }
316 // -----------------------------------------------------------------------------
317 // JNI locks on java objects
318 // NOTE: must use heavy weight monitor to handle jni monitor enter
jni_enter(Handle obj,TRAPS)319 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
320 // the current locking is from JNI instead of Java code
321 TEVENT (jni_enter) ;
322 if (UseBiasedLocking) {
323 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
324 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
325 }
326 THREAD->set_current_pending_monitor_is_from_java(false);
327 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
328 THREAD->set_current_pending_monitor_is_from_java(true);
329 }
330
331 // NOTE: must use heavy weight monitor to handle jni monitor enter
jni_try_enter(Handle obj,Thread * THREAD)332 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
333 if (UseBiasedLocking) {
334 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
335 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
336 }
337
338 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
339 return monitor->try_enter(THREAD);
340 }
341
342
343 // NOTE: must use heavy weight monitor to handle jni monitor exit
jni_exit(oop obj,Thread * THREAD)344 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
345 TEVENT (jni_exit) ;
346 if (UseBiasedLocking) {
347 Handle h_obj(THREAD, obj);
348 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
349 obj = h_obj();
350 }
351 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
352
353 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
354 obj,
355 inflate_cause_jni_exit);
356 // If this thread has locked the object, exit the monitor. Note: can't use
357 // monitor->check(CHECK); must exit even if an exception is pending.
358 if (monitor->check(THREAD)) {
359 monitor->exit(true, THREAD);
360 }
361 }
362
363 // -----------------------------------------------------------------------------
364 // Internal VM locks on java objects
365 // standard constructor, allows locking failures
ObjectLocker(Handle obj,Thread * thread,bool doLock)366 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
367 _dolock = doLock;
368 _thread = thread;
369 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
370 _obj = obj;
371
372 if (_dolock) {
373 TEVENT (ObjectLocker) ;
374
375 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
376 }
377 }
378
~ObjectLocker()379 ObjectLocker::~ObjectLocker() {
380 if (_dolock) {
381 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
382 }
383 }
384
385
386 // -----------------------------------------------------------------------------
387 // Wait/Notify/NotifyAll
388 // NOTE: must use heavy weight monitor to handle wait()
wait(Handle obj,jlong millis,TRAPS)389 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
390 if (UseBiasedLocking) {
391 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
392 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
393 }
394 if (millis < 0) {
395 TEVENT (wait - throw IAX) ;
396 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
397 }
398 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
399 obj(),
400 inflate_cause_wait);
401
402 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
403 monitor->wait(millis, true, THREAD);
404
405 /* This dummy call is in place to get around dtrace bug 6254741. Once
406 that's fixed we can uncomment the following line and remove the call */
407 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
408 dtrace_waited_probe(monitor, obj, THREAD);
409 }
410
waitUninterruptibly(Handle obj,jlong millis,TRAPS)411 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
412 if (UseBiasedLocking) {
413 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
414 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
415 }
416 if (millis < 0) {
417 TEVENT (wait - throw IAX) ;
418 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
419 }
420 ObjectSynchronizer::inflate(THREAD,
421 obj(),
422 inflate_cause_wait)->wait(millis, false, THREAD) ;
423 }
424
notify(Handle obj,TRAPS)425 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
426 if (UseBiasedLocking) {
427 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
428 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
429 }
430
431 markOop mark = obj->mark();
432 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
433 return;
434 }
435 ObjectSynchronizer::inflate(THREAD,
436 obj(),
437 inflate_cause_notify)->notify(THREAD);
438 }
439
440 // NOTE: see comment of notify()
notifyall(Handle obj,TRAPS)441 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
442 if (UseBiasedLocking) {
443 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
444 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
445 }
446
447 markOop mark = obj->mark();
448 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
449 return;
450 }
451 ObjectSynchronizer::inflate(THREAD,
452 obj(),
453 inflate_cause_notify)->notifyAll(THREAD);
454 }
455
456 // -----------------------------------------------------------------------------
457 // Hash Code handling
458 //
459 // Performance concern:
460 // OrderAccess::storestore() calls release() which at one time stored 0
461 // into the global volatile OrderAccess::dummy variable. This store was
462 // unnecessary for correctness. Many threads storing into a common location
463 // causes considerable cache migration or "sloshing" on large SMP systems.
464 // As such, I avoided using OrderAccess::storestore(). In some cases
465 // OrderAccess::fence() -- which incurs local latency on the executing
466 // processor -- is a better choice as it scales on SMP systems.
467 //
468 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
469 // a discussion of coherency costs. Note that all our current reference
470 // platforms provide strong ST-ST order, so the issue is moot on IA32,
471 // x64, and SPARC.
472 //
473 // As a general policy we use "volatile" to control compiler-based reordering
474 // and explicit fences (barriers) to control for architectural reordering
475 // performed by the CPU(s) or platform.
476
477 struct SharedGlobals {
478 // These are highly shared mostly-read variables.
479 // To avoid false-sharing they need to be the sole occupants of a $ line.
480 double padPrefix [8];
481 volatile int stwRandom ;
482 volatile int stwCycle ;
483
484 // Hot RW variables -- Sequester to avoid false-sharing
485 double padSuffix [16];
486 volatile int hcSequence ;
487 double padFinal [8] ;
488 } ;
489
490 static SharedGlobals GVars ;
491 static int MonitorScavengeThreshold = 1000000 ;
492 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
493
ReadStableMark(oop obj)494 static markOop ReadStableMark (oop obj) {
495 markOop mark = obj->mark() ;
496 if (!mark->is_being_inflated()) {
497 return mark ; // normal fast-path return
498 }
499
500 int its = 0 ;
501 for (;;) {
502 markOop mark = obj->mark() ;
503 if (!mark->is_being_inflated()) {
504 return mark ; // normal fast-path return
505 }
506
507 // The object is being inflated by some other thread.
508 // The caller of ReadStableMark() must wait for inflation to complete.
509 // Avoid live-lock
510 // TODO: consider calling SafepointSynchronize::do_call_back() while
511 // spinning to see if there's a safepoint pending. If so, immediately
512 // yielding or blocking would be appropriate. Avoid spinning while
513 // there is a safepoint pending.
514 // TODO: add inflation contention performance counters.
515 // TODO: restrict the aggregate number of spinners.
516
517 ++its ;
518 if (its > 10000 || !os::is_MP()) {
519 if (its & 1) {
520 os::NakedYield() ;
521 TEVENT (Inflate: INFLATING - yield) ;
522 } else {
523 // Note that the following code attenuates the livelock problem but is not
524 // a complete remedy. A more complete solution would require that the inflating
525 // thread hold the associated inflation lock. The following code simply restricts
526 // the number of spinners to at most one. We'll have N-2 threads blocked
527 // on the inflationlock, 1 thread holding the inflation lock and using
528 // a yield/park strategy, and 1 thread in the midst of inflation.
529 // A more refined approach would be to change the encoding of INFLATING
530 // to allow encapsulation of a native thread pointer. Threads waiting for
531 // inflation to complete would use CAS to push themselves onto a singly linked
532 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
533 // and calling park(). When inflation was complete the thread that accomplished inflation
534 // would detach the list and set the markword to inflated with a single CAS and
535 // then for each thread on the list, set the flag and unpark() the thread.
536 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
537 // wakes at most one thread whereas we need to wake the entire list.
538 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ;
539 int YieldThenBlock = 0 ;
540 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
541 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
542 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
543 while (obj->mark() == markOopDesc::INFLATING()) {
544 // Beware: NakedYield() is advisory and has almost no effect on some platforms
545 // so we periodically call Self->_ParkEvent->park(1).
546 // We use a mixed spin/yield/block mechanism.
547 if ((YieldThenBlock++) >= 16) {
548 Thread::current()->_ParkEvent->park(1) ;
549 } else {
550 os::NakedYield() ;
551 }
552 }
553 Thread::muxRelease (InflationLocks + ix ) ;
554 TEVENT (Inflate: INFLATING - yield/park) ;
555 }
556 } else {
557 SpinPause() ; // SMP-polite spinning
558 }
559 }
560 }
561
562 // hashCode() generation :
563 //
564 // Possibilities:
565 // * MD5Digest of {obj,stwRandom}
566 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
567 // * A DES- or AES-style SBox[] mechanism
568 // * One of the Phi-based schemes, such as:
569 // 2654435761 = 2^32 * Phi (golden ratio)
570 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
571 // * A variation of Marsaglia's shift-xor RNG scheme.
572 // * (obj ^ stwRandom) is appealing, but can result
573 // in undesirable regularity in the hashCode values of adjacent objects
574 // (objects allocated back-to-back, in particular). This could potentially
575 // result in hashtable collisions and reduced hashtable efficiency.
576 // There are simple ways to "diffuse" the middle address bits over the
577 // generated hashCode values:
578 //
579
get_next_hash(Thread * Self,oop obj)580 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
581 intptr_t value = 0 ;
582 if (hashCode == 0) {
583 // This form uses an unguarded global Park-Miller RNG,
584 // so it's possible for two threads to race and generate the same RNG.
585 // On MP system we'll have lots of RW access to a global, so the
586 // mechanism induces lots of coherency traffic.
587 value = os::random() ;
588 } else
589 if (hashCode == 1) {
590 // This variation has the property of being stable (idempotent)
591 // between STW operations. This can be useful in some of the 1-0
592 // synchronization schemes.
593 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ;
594 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
595 } else
596 if (hashCode == 2) {
597 value = 1 ; // for sensitivity testing
598 } else
599 if (hashCode == 3) {
600 value = ++GVars.hcSequence ;
601 } else
602 if (hashCode == 4) {
603 value = cast_from_oop<intptr_t>(obj) ;
604 } else {
605 // Marsaglia's xor-shift scheme with thread-specific state
606 // This is probably the best overall implementation -- we'll
607 // likely make this the default in future releases.
608 unsigned t = Self->_hashStateX ;
609 t ^= (t << 11) ;
610 Self->_hashStateX = Self->_hashStateY ;
611 Self->_hashStateY = Self->_hashStateZ ;
612 Self->_hashStateZ = Self->_hashStateW ;
613 unsigned v = Self->_hashStateW ;
614 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
615 Self->_hashStateW = v ;
616 value = v ;
617 }
618
619 value &= markOopDesc::hash_mask;
620 if (value == 0) value = 0xBAD ;
621 assert (value != markOopDesc::no_hash, "invariant") ;
622 TEVENT (hashCode: GENERATE) ;
623 return value;
624 }
625 //
FastHashCode(Thread * Self,oop obj)626 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
627 if (UseBiasedLocking) {
628 // NOTE: many places throughout the JVM do not expect a safepoint
629 // to be taken here, in particular most operations on perm gen
630 // objects. However, we only ever bias Java instances and all of
631 // the call sites of identity_hash that might revoke biases have
632 // been checked to make sure they can handle a safepoint. The
633 // added check of the bias pattern is to avoid useless calls to
634 // thread-local storage.
635 if (obj->mark()->has_bias_pattern()) {
636 // Box and unbox the raw reference just in case we cause a STW safepoint.
637 Handle hobj (Self, obj) ;
638 // Relaxing assertion for bug 6320749.
639 assert (Universe::verify_in_progress() ||
640 !SafepointSynchronize::is_at_safepoint(),
641 "biases should not be seen by VM thread here");
642 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
643 obj = hobj() ;
644 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
645 }
646 }
647
648 // hashCode() is a heap mutator ...
649 // Relaxing assertion for bug 6320749.
650 assert (Universe::verify_in_progress() ||
651 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
652 assert (Universe::verify_in_progress() ||
653 Self->is_Java_thread() , "invariant") ;
654 assert (Universe::verify_in_progress() ||
655 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
656
657 ObjectMonitor* monitor = NULL;
658 markOop temp, test;
659 intptr_t hash;
660 markOop mark = ReadStableMark (obj);
661
662 // object should remain ineligible for biased locking
663 assert (!mark->has_bias_pattern(), "invariant") ;
664
665 if (mark->is_neutral()) {
666 hash = mark->hash(); // this is a normal header
667 if (hash) { // if it has hash, just return it
668 return hash;
669 }
670 hash = get_next_hash(Self, obj); // allocate a new hash code
671 temp = mark->copy_set_hash(hash); // merge the hash code into header
672 // use (machine word version) atomic operation to install the hash
673 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
674 if (test == mark) {
675 return hash;
676 }
677 // If atomic operation failed, we must inflate the header
678 // into heavy weight monitor. We could add more code here
679 // for fast path, but it does not worth the complexity.
680 } else if (mark->has_monitor()) {
681 monitor = mark->monitor();
682 temp = monitor->header();
683 assert (temp->is_neutral(), "invariant") ;
684 hash = temp->hash();
685 if (hash) {
686 return hash;
687 }
688 // Skip to the following code to reduce code size
689 } else if (Self->is_lock_owned((address)mark->locker())) {
690 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
691 assert (temp->is_neutral(), "invariant") ;
692 hash = temp->hash(); // by current thread, check if the displaced
693 if (hash) { // header contains hash code
694 return hash;
695 }
696 // WARNING:
697 // The displaced header is strictly immutable.
698 // It can NOT be changed in ANY cases. So we have
699 // to inflate the header into heavyweight monitor
700 // even the current thread owns the lock. The reason
701 // is the BasicLock (stack slot) will be asynchronously
702 // read by other threads during the inflate() function.
703 // Any change to stack may not propagate to other threads
704 // correctly.
705 }
706
707 // Inflate the monitor to set hash code
708 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
709 // Load displaced header and check it has hash code
710 mark = monitor->header();
711 assert (mark->is_neutral(), "invariant") ;
712 hash = mark->hash();
713 if (hash == 0) {
714 hash = get_next_hash(Self, obj);
715 temp = mark->copy_set_hash(hash); // merge hash code into header
716 assert (temp->is_neutral(), "invariant") ;
717 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
718 if (test != mark) {
719 // The only update to the header in the monitor (outside GC)
720 // is install the hash code. If someone add new usage of
721 // displaced header, please update this code
722 hash = test->hash();
723 assert (test->is_neutral(), "invariant") ;
724 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
725 }
726 }
727 // We finally get the hash
728 return hash;
729 }
730
731 // Deprecated -- use FastHashCode() instead.
732
identity_hash_value_for(Handle obj)733 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
734 return FastHashCode (Thread::current(), obj()) ;
735 }
736
737
current_thread_holds_lock(JavaThread * thread,Handle h_obj)738 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
739 Handle h_obj) {
740 if (UseBiasedLocking) {
741 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
742 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
743 }
744
745 assert(thread == JavaThread::current(), "Can only be called on current thread");
746 oop obj = h_obj();
747
748 markOop mark = ReadStableMark (obj) ;
749
750 // Uncontended case, header points to stack
751 if (mark->has_locker()) {
752 return thread->is_lock_owned((address)mark->locker());
753 }
754 // Contended case, header points to ObjectMonitor (tagged pointer)
755 if (mark->has_monitor()) {
756 ObjectMonitor* monitor = mark->monitor();
757 return monitor->is_entered(thread) != 0 ;
758 }
759 // Unlocked case, header in place
760 assert(mark->is_neutral(), "sanity check");
761 return false;
762 }
763
764 // Be aware of this method could revoke bias of the lock object.
765 // This method querys the ownership of the lock handle specified by 'h_obj'.
766 // If the current thread owns the lock, it returns owner_self. If no
767 // thread owns the lock, it returns owner_none. Otherwise, it will return
768 // ower_other.
query_lock_ownership(JavaThread * self,Handle h_obj)769 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
770 (JavaThread *self, Handle h_obj) {
771 // The caller must beware this method can revoke bias, and
772 // revocation can result in a safepoint.
773 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
774 assert (self->thread_state() != _thread_blocked , "invariant") ;
775
776 // Possible mark states: neutral, biased, stack-locked, inflated
777
778 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
779 // CASE: biased
780 BiasedLocking::revoke_and_rebias(h_obj, false, self);
781 assert(!h_obj->mark()->has_bias_pattern(),
782 "biases should be revoked by now");
783 }
784
785 assert(self == JavaThread::current(), "Can only be called on current thread");
786 oop obj = h_obj();
787 markOop mark = ReadStableMark (obj) ;
788
789 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
790 if (mark->has_locker()) {
791 return self->is_lock_owned((address)mark->locker()) ?
792 owner_self : owner_other;
793 }
794
795 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
796 // The Object:ObjectMonitor relationship is stable as long as we're
797 // not at a safepoint.
798 if (mark->has_monitor()) {
799 void * owner = mark->monitor()->_owner ;
800 if (owner == NULL) return owner_none ;
801 return (owner == self ||
802 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
803 }
804
805 // CASE: neutral
806 assert(mark->is_neutral(), "sanity check");
807 return owner_none ; // it's unlocked
808 }
809
810 // FIXME: jvmti should call this
get_lock_owner(Handle h_obj,bool doLock)811 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
812 if (UseBiasedLocking) {
813 if (SafepointSynchronize::is_at_safepoint()) {
814 BiasedLocking::revoke_at_safepoint(h_obj);
815 } else {
816 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
817 }
818 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
819 }
820
821 oop obj = h_obj();
822 address owner = NULL;
823
824 markOop mark = ReadStableMark (obj) ;
825
826 // Uncontended case, header points to stack
827 if (mark->has_locker()) {
828 owner = (address) mark->locker();
829 }
830
831 // Contended case, header points to ObjectMonitor (tagged pointer)
832 if (mark->has_monitor()) {
833 ObjectMonitor* monitor = mark->monitor();
834 assert(monitor != NULL, "monitor should be non-null");
835 owner = (address) monitor->owner();
836 }
837
838 if (owner != NULL) {
839 // owning_thread_from_monitor_owner() may also return NULL here
840 return Threads::owning_thread_from_monitor_owner(owner, doLock);
841 }
842
843 // Unlocked case, header in place
844 // Cannot have assertion since this object may have been
845 // locked by another thread when reaching here.
846 // assert(mark->is_neutral(), "sanity check");
847
848 return NULL;
849 }
850 // Visitors ...
851
monitors_iterate(MonitorClosure * closure)852 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
853 ObjectMonitor* block =
854 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
855 while (block != NULL) {
856 assert(block->object() == CHAINMARKER, "must be a block header");
857 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
858 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
859 oop object = (oop)mid->object();
860 if (object != NULL) {
861 closure->do_monitor(mid);
862 }
863 }
864 block = (ObjectMonitor*)block->FreeNext;
865 }
866 }
867
868 // Get the next block in the block list.
next(ObjectMonitor * block)869 static inline ObjectMonitor* next(ObjectMonitor* block) {
870 assert(block->object() == CHAINMARKER, "must be a block header");
871 block = block->FreeNext ;
872 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
873 return block;
874 }
875
876
oops_do(OopClosure * f)877 void ObjectSynchronizer::oops_do(OopClosure* f) {
878 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
879 ObjectMonitor* block =
880 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
881 for (; block != NULL; block = (ObjectMonitor *)next(block)) {
882 assert(block->object() == CHAINMARKER, "must be a block header");
883 for (int i = 1; i < _BLOCKSIZE; i++) {
884 ObjectMonitor* mid = &block[i];
885 if (mid->object() != NULL) {
886 f->do_oop((oop*)mid->object_addr());
887 }
888 }
889 }
890 }
891
892
893 // -----------------------------------------------------------------------------
894 // ObjectMonitor Lifecycle
895 // -----------------------
896 // Inflation unlinks monitors from the global gFreeList and
897 // associates them with objects. Deflation -- which occurs at
898 // STW-time -- disassociates idle monitors from objects. Such
899 // scavenged monitors are returned to the gFreeList.
900 //
901 // The global list is protected by ListLock. All the critical sections
902 // are short and operate in constant-time.
903 //
904 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
905 //
906 // Lifecycle:
907 // -- unassigned and on the global free list
908 // -- unassigned and on a thread's private omFreeList
909 // -- assigned to an object. The object is inflated and the mark refers
910 // to the objectmonitor.
911 //
912
913
914 // Constraining monitor pool growth via MonitorBound ...
915 //
916 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
917 // the rate of scavenging is driven primarily by GC. As such, we can find
918 // an inordinate number of monitors in circulation.
919 // To avoid that scenario we can artificially induce a STW safepoint
920 // if the pool appears to be growing past some reasonable bound.
921 // Generally we favor time in space-time tradeoffs, but as there's no
922 // natural back-pressure on the # of extant monitors we need to impose some
923 // type of limit. Beware that if MonitorBound is set to too low a value
924 // we could just loop. In addition, if MonitorBound is set to a low value
925 // we'll incur more safepoints, which are harmful to performance.
926 // See also: GuaranteedSafepointInterval
927 //
928 // The current implementation uses asynchronous VM operations.
929 //
930
InduceScavenge(Thread * Self,const char * Whence)931 static void InduceScavenge (Thread * Self, const char * Whence) {
932 // Induce STW safepoint to trim monitors
933 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
934 // More precisely, trigger an asynchronous STW safepoint as the number
935 // of active monitors passes the specified threshold.
936 // TODO: assert thread state is reasonable
937
938 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
939 if (ObjectMonitor::Knob_Verbose) {
940 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
941 ::fflush(stdout) ;
942 }
943 // Induce a 'null' safepoint to scavenge monitors
944 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
945 // to the VMthread and have a lifespan longer than that of this activation record.
946 // The VMThread will delete the op when completed.
947 VMThread::execute (new VM_ForceAsyncSafepoint()) ;
948
949 if (ObjectMonitor::Knob_Verbose) {
950 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
951 ::fflush(stdout) ;
952 }
953 }
954 }
955 /* Too slow for general assert or debug
956 void ObjectSynchronizer::verifyInUse (Thread *Self) {
957 ObjectMonitor* mid;
958 int inusetally = 0;
959 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
960 inusetally ++;
961 }
962 assert(inusetally == Self->omInUseCount, "inuse count off");
963
964 int freetally = 0;
965 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
966 freetally ++;
967 }
968 assert(freetally == Self->omFreeCount, "free count off");
969 }
970 */
omAlloc(Thread * Self)971 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
972 // A large MAXPRIVATE value reduces both list lock contention
973 // and list coherency traffic, but also tends to increase the
974 // number of objectMonitors in circulation as well as the STW
975 // scavenge costs. As usual, we lean toward time in space-time
976 // tradeoffs.
977 const int MAXPRIVATE = 1024 ;
978 for (;;) {
979 ObjectMonitor * m ;
980
981 // 1: try to allocate from the thread's local omFreeList.
982 // Threads will attempt to allocate first from their local list, then
983 // from the global list, and only after those attempts fail will the thread
984 // attempt to instantiate new monitors. Thread-local free lists take
985 // heat off the ListLock and improve allocation latency, as well as reducing
986 // coherency traffic on the shared global list.
987 m = Self->omFreeList ;
988 if (m != NULL) {
989 Self->omFreeList = m->FreeNext ;
990 Self->omFreeCount -- ;
991 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
992 guarantee (m->object() == NULL, "invariant") ;
993 if (MonitorInUseLists) {
994 m->FreeNext = Self->omInUseList;
995 Self->omInUseList = m;
996 Self->omInUseCount ++;
997 // verifyInUse(Self);
998 } else {
999 m->FreeNext = NULL;
1000 }
1001 return m ;
1002 }
1003
1004 // 2: try to allocate from the global gFreeList
1005 // CONSIDER: use muxTry() instead of muxAcquire().
1006 // If the muxTry() fails then drop immediately into case 3.
1007 // If we're using thread-local free lists then try
1008 // to reprovision the caller's free list.
1009 if (gFreeList != NULL) {
1010 // Reprovision the thread's omFreeList.
1011 // Use bulk transfers to reduce the allocation rate and heat
1012 // on various locks.
1013 Thread::muxAcquire (&ListLock, "omAlloc") ;
1014 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
1015 MonitorFreeCount --;
1016 ObjectMonitor * take = gFreeList ;
1017 gFreeList = take->FreeNext ;
1018 guarantee (take->object() == NULL, "invariant") ;
1019 guarantee (!take->is_busy(), "invariant") ;
1020 take->Recycle() ;
1021 omRelease (Self, take, false) ;
1022 }
1023 Thread::muxRelease (&ListLock) ;
1024 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
1025 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
1026 TEVENT (omFirst - reprovision) ;
1027
1028 const int mx = MonitorBound ;
1029 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
1030 // We can't safely induce a STW safepoint from omAlloc() as our thread
1031 // state may not be appropriate for such activities and callers may hold
1032 // naked oops, so instead we defer the action.
1033 InduceScavenge (Self, "omAlloc") ;
1034 }
1035 continue;
1036 }
1037
1038 // 3: allocate a block of new ObjectMonitors
1039 // Both the local and global free lists are empty -- resort to malloc().
1040 // In the current implementation objectMonitors are TSM - immortal.
1041 assert (_BLOCKSIZE > 1, "invariant") ;
1042 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
1043
1044 // NOTE: (almost) no way to recover if allocation failed.
1045 // We might be able to induce a STW safepoint and scavenge enough
1046 // objectMonitors to permit progress.
1047 if (temp == NULL) {
1048 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
1049 "Allocate ObjectMonitors");
1050 }
1051
1052 // Format the block.
1053 // initialize the linked list, each monitor points to its next
1054 // forming the single linked free list, the very first monitor
1055 // will points to next block, which forms the block list.
1056 // The trick of using the 1st element in the block as gBlockList
1057 // linkage should be reconsidered. A better implementation would
1058 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1059
1060 for (int i = 1; i < _BLOCKSIZE ; i++) {
1061 temp[i].FreeNext = &temp[i+1];
1062 }
1063
1064 // terminate the last monitor as the end of list
1065 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
1066
1067 // Element [0] is reserved for global list linkage
1068 temp[0].set_object(CHAINMARKER);
1069
1070 // Consider carving out this thread's current request from the
1071 // block in hand. This avoids some lock traffic and redundant
1072 // list activity.
1073
1074 // Acquire the ListLock to manipulate BlockList and FreeList.
1075 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1076 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
1077 MonitorPopulation += _BLOCKSIZE-1;
1078 MonitorFreeCount += _BLOCKSIZE-1;
1079
1080 // Add the new block to the list of extant blocks (gBlockList).
1081 // The very first objectMonitor in a block is reserved and dedicated.
1082 // It serves as blocklist "next" linkage.
1083 temp[0].FreeNext = gBlockList;
1084 // There are lock-free uses of gBlockList so make sure that
1085 // the previous stores happen before we update gBlockList.
1086 OrderAccess::release_store_ptr(&gBlockList, temp);
1087
1088 // Add the new string of objectMonitors to the global free list
1089 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
1090 gFreeList = temp + 1;
1091 Thread::muxRelease (&ListLock) ;
1092 TEVENT (Allocate block of monitors) ;
1093 }
1094 }
1095
1096 // Place "m" on the caller's private per-thread omFreeList.
1097 // In practice there's no need to clamp or limit the number of
1098 // monitors on a thread's omFreeList as the only time we'll call
1099 // omRelease is to return a monitor to the free list after a CAS
1100 // attempt failed. This doesn't allow unbounded #s of monitors to
1101 // accumulate on a thread's free list.
1102 //
1103
omRelease(Thread * Self,ObjectMonitor * m,bool fromPerThreadAlloc)1104 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
1105 guarantee (m->object() == NULL, "invariant") ;
1106
1107 // Remove from omInUseList
1108 if (MonitorInUseLists && fromPerThreadAlloc) {
1109 ObjectMonitor* curmidinuse = NULL;
1110 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
1111 if (m == mid) {
1112 // extract from per-thread in-use-list
1113 if (mid == Self->omInUseList) {
1114 Self->omInUseList = mid->FreeNext;
1115 } else if (curmidinuse != NULL) {
1116 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1117 }
1118 Self->omInUseCount --;
1119 // verifyInUse(Self);
1120 break;
1121 } else {
1122 curmidinuse = mid;
1123 mid = mid->FreeNext;
1124 }
1125 }
1126 }
1127
1128 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
1129 m->FreeNext = Self->omFreeList ;
1130 Self->omFreeList = m ;
1131 Self->omFreeCount ++ ;
1132 }
1133
1134 // Return the monitors of a moribund thread's local free list to
1135 // the global free list. Typically a thread calls omFlush() when
1136 // it's dying. We could also consider having the VM thread steal
1137 // monitors from threads that have not run java code over a few
1138 // consecutive STW safepoints. Relatedly, we might decay
1139 // omFreeProvision at STW safepoints.
1140 //
1141 // Also return the monitors of a moribund thread"s omInUseList to
1142 // a global gOmInUseList under the global list lock so these
1143 // will continue to be scanned.
1144 //
1145 // We currently call omFlush() from the Thread:: dtor _after the thread
1146 // has been excised from the thread list and is no longer a mutator.
1147 // That means that omFlush() can run concurrently with a safepoint and
1148 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
1149 // be a better choice as we could safely reason that that the JVM is
1150 // not at a safepoint at the time of the call, and thus there could
1151 // be not inopportune interleavings between omFlush() and the scavenge
1152 // operator.
1153
omFlush(Thread * Self)1154 void ObjectSynchronizer::omFlush (Thread * Self) {
1155 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
1156 Self->omFreeList = NULL ;
1157 ObjectMonitor * Tail = NULL ;
1158 int Tally = 0;
1159 if (List != NULL) {
1160 ObjectMonitor * s ;
1161 for (s = List ; s != NULL ; s = s->FreeNext) {
1162 Tally ++ ;
1163 Tail = s ;
1164 guarantee (s->object() == NULL, "invariant") ;
1165 guarantee (!s->is_busy(), "invariant") ;
1166 s->set_owner (NULL) ; // redundant but good hygiene
1167 TEVENT (omFlush - Move one) ;
1168 }
1169 guarantee (Tail != NULL && List != NULL, "invariant") ;
1170 }
1171
1172 ObjectMonitor * InUseList = Self->omInUseList;
1173 ObjectMonitor * InUseTail = NULL ;
1174 int InUseTally = 0;
1175 if (InUseList != NULL) {
1176 Self->omInUseList = NULL;
1177 ObjectMonitor *curom;
1178 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1179 InUseTail = curom;
1180 InUseTally++;
1181 }
1182 // TODO debug
1183 assert(Self->omInUseCount == InUseTally, "inuse count off");
1184 Self->omInUseCount = 0;
1185 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1186 }
1187
1188 Thread::muxAcquire (&ListLock, "omFlush") ;
1189 if (Tail != NULL) {
1190 Tail->FreeNext = gFreeList ;
1191 gFreeList = List ;
1192 MonitorFreeCount += Tally;
1193 }
1194
1195 if (InUseTail != NULL) {
1196 InUseTail->FreeNext = gOmInUseList;
1197 gOmInUseList = InUseList;
1198 gOmInUseCount += InUseTally;
1199 }
1200
1201 Thread::muxRelease (&ListLock) ;
1202 TEVENT (omFlush) ;
1203 }
1204
inflate_cause_name(const InflateCause cause)1205 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1206 switch (cause) {
1207 case inflate_cause_vm_internal: return "VM Internal";
1208 case inflate_cause_monitor_enter: return "Monitor Enter";
1209 case inflate_cause_wait: return "Monitor Wait";
1210 case inflate_cause_notify: return "Monitor Notify";
1211 case inflate_cause_hash_code: return "Monitor Hash Code";
1212 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1213 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1214 default:
1215 ShouldNotReachHere();
1216 }
1217 return "Unknown";
1218 }
1219
post_monitor_inflate_event(EventJavaMonitorInflate * event,const oop obj,const ObjectSynchronizer::InflateCause cause)1220 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1221 const oop obj,
1222 const ObjectSynchronizer::InflateCause cause) {
1223 assert(event != NULL, "invariant");
1224 assert(event->should_commit(), "invariant");
1225 event->set_monitorClass(obj->klass());
1226 event->set_address((uintptr_t)(void*)obj);
1227 event->set_cause((u1)cause);
1228 event->commit();
1229 }
1230
1231 // Fast path code shared by multiple functions
inflate_helper(oop obj)1232 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1233 markOop mark = obj->mark();
1234 if (mark->has_monitor()) {
1235 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1236 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1237 return mark->monitor();
1238 }
1239 return ObjectSynchronizer::inflate(Thread::current(),
1240 obj,
1241 inflate_cause_vm_internal);
1242 }
1243
1244
1245 // Note that we could encounter some performance loss through false-sharing as
1246 // multiple locks occupy the same $ line. Padding might be appropriate.
1247
1248
inflate(Thread * Self,oop object,const InflateCause cause)1249 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self,
1250 oop object,
1251 const InflateCause cause) {
1252 // Inflate mutates the heap ...
1253 // Relaxing assertion for bug 6320749.
1254 assert (Universe::verify_in_progress() ||
1255 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1256
1257 EventJavaMonitorInflate event;
1258
1259 for (;;) {
1260 const markOop mark = object->mark() ;
1261 assert (!mark->has_bias_pattern(), "invariant") ;
1262
1263 // The mark can be in one of the following states:
1264 // * Inflated - just return
1265 // * Stack-locked - coerce it to inflated
1266 // * INFLATING - busy wait for conversion to complete
1267 // * Neutral - aggressively inflate the object.
1268 // * BIASED - Illegal. We should never see this
1269
1270 // CASE: inflated
1271 if (mark->has_monitor()) {
1272 ObjectMonitor * inf = mark->monitor() ;
1273 assert (inf->header()->is_neutral(), "invariant");
1274 assert (inf->object() == object, "invariant") ;
1275 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1276 return inf ;
1277 }
1278
1279 // CASE: inflation in progress - inflating over a stack-lock.
1280 // Some other thread is converting from stack-locked to inflated.
1281 // Only that thread can complete inflation -- other threads must wait.
1282 // The INFLATING value is transient.
1283 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1284 // We could always eliminate polling by parking the thread on some auxiliary list.
1285 if (mark == markOopDesc::INFLATING()) {
1286 TEVENT (Inflate: spin while INFLATING) ;
1287 ReadStableMark(object) ;
1288 continue ;
1289 }
1290
1291 // CASE: stack-locked
1292 // Could be stack-locked either by this thread or by some other thread.
1293 //
1294 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1295 // to install INFLATING into the mark word. We originally installed INFLATING,
1296 // allocated the objectmonitor, and then finally STed the address of the
1297 // objectmonitor into the mark. This was correct, but artificially lengthened
1298 // the interval in which INFLATED appeared in the mark, thus increasing
1299 // the odds of inflation contention.
1300 //
1301 // We now use per-thread private objectmonitor free lists.
1302 // These list are reprovisioned from the global free list outside the
1303 // critical INFLATING...ST interval. A thread can transfer
1304 // multiple objectmonitors en-mass from the global free list to its local free list.
1305 // This reduces coherency traffic and lock contention on the global free list.
1306 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1307 // before or after the CAS(INFLATING) operation.
1308 // See the comments in omAlloc().
1309
1310 if (mark->has_locker()) {
1311 ObjectMonitor * m = omAlloc (Self) ;
1312 // Optimistically prepare the objectmonitor - anticipate successful CAS
1313 // We do this before the CAS in order to minimize the length of time
1314 // in which INFLATING appears in the mark.
1315 m->Recycle();
1316 m->_Responsible = NULL ;
1317 m->OwnerIsThread = 0 ;
1318 m->_recursions = 0 ;
1319 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
1320
1321 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1322 if (cmp != mark) {
1323 omRelease (Self, m, true) ;
1324 continue ; // Interference -- just retry
1325 }
1326
1327 // We've successfully installed INFLATING (0) into the mark-word.
1328 // This is the only case where 0 will appear in a mark-work.
1329 // Only the singular thread that successfully swings the mark-word
1330 // to 0 can perform (or more precisely, complete) inflation.
1331 //
1332 // Why do we CAS a 0 into the mark-word instead of just CASing the
1333 // mark-word from the stack-locked value directly to the new inflated state?
1334 // Consider what happens when a thread unlocks a stack-locked object.
1335 // It attempts to use CAS to swing the displaced header value from the
1336 // on-stack basiclock back into the object header. Recall also that the
1337 // header value (hashcode, etc) can reside in (a) the object header, or
1338 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1339 // header in an objectMonitor. The inflate() routine must copy the header
1340 // value from the basiclock on the owner's stack to the objectMonitor, all
1341 // the while preserving the hashCode stability invariants. If the owner
1342 // decides to release the lock while the value is 0, the unlock will fail
1343 // and control will eventually pass from slow_exit() to inflate. The owner
1344 // will then spin, waiting for the 0 value to disappear. Put another way,
1345 // the 0 causes the owner to stall if the owner happens to try to
1346 // drop the lock (restoring the header from the basiclock to the object)
1347 // while inflation is in-progress. This protocol avoids races that might
1348 // would otherwise permit hashCode values to change or "flicker" for an object.
1349 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1350 // 0 serves as a "BUSY" inflate-in-progress indicator.
1351
1352
1353 // fetch the displaced mark from the owner's stack.
1354 // The owner can't die or unwind past the lock while our INFLATING
1355 // object is in the mark. Furthermore the owner can't complete
1356 // an unlock on the object, either.
1357 markOop dmw = mark->displaced_mark_helper() ;
1358 assert (dmw->is_neutral(), "invariant") ;
1359
1360 // Setup monitor fields to proper values -- prepare the monitor
1361 m->set_header(dmw) ;
1362
1363 // Optimization: if the mark->locker stack address is associated
1364 // with this thread we could simply set m->_owner = Self and
1365 // m->OwnerIsThread = 1. Note that a thread can inflate an object
1366 // that it has stack-locked -- as might happen in wait() -- directly
1367 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1368 m->set_owner(mark->locker());
1369 m->set_object(object);
1370 // TODO-FIXME: assert BasicLock->dhw != 0.
1371
1372 // Must preserve store ordering. The monitor state must
1373 // be stable at the time of publishing the monitor address.
1374 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1375 object->release_set_mark(markOopDesc::encode(m));
1376
1377 // Hopefully the performance counters are allocated on distinct cache lines
1378 // to avoid false sharing on MP systems ...
1379 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1380 TEVENT(Inflate: overwrite stacklock) ;
1381 if (TraceMonitorInflation) {
1382 if (object->is_instance()) {
1383 ResourceMark rm;
1384 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1385 (void *) object, (intptr_t) object->mark(),
1386 object->klass()->external_name());
1387 }
1388 }
1389 if (event.should_commit()) {
1390 post_monitor_inflate_event(&event, object, cause);
1391 }
1392 return m ;
1393 }
1394
1395 // CASE: neutral
1396 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1397 // If we know we're inflating for entry it's better to inflate by swinging a
1398 // pre-locked objectMonitor pointer into the object header. A successful
1399 // CAS inflates the object *and* confers ownership to the inflating thread.
1400 // In the current implementation we use a 2-step mechanism where we CAS()
1401 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1402 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1403 // would be useful.
1404
1405 assert (mark->is_neutral(), "invariant");
1406 ObjectMonitor * m = omAlloc (Self) ;
1407 // prepare m for installation - set monitor to initial state
1408 m->Recycle();
1409 m->set_header(mark);
1410 m->set_owner(NULL);
1411 m->set_object(object);
1412 m->OwnerIsThread = 1 ;
1413 m->_recursions = 0 ;
1414 m->_Responsible = NULL ;
1415 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1416
1417 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1418 m->set_object (NULL) ;
1419 m->set_owner (NULL) ;
1420 m->OwnerIsThread = 0 ;
1421 m->Recycle() ;
1422 omRelease (Self, m, true) ;
1423 m = NULL ;
1424 continue ;
1425 // interference - the markword changed - just retry.
1426 // The state-transitions are one-way, so there's no chance of
1427 // live-lock -- "Inflated" is an absorbing state.
1428 }
1429
1430 // Hopefully the performance counters are allocated on distinct
1431 // cache lines to avoid false sharing on MP systems ...
1432 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1433 TEVENT(Inflate: overwrite neutral) ;
1434 if (TraceMonitorInflation) {
1435 if (object->is_instance()) {
1436 ResourceMark rm;
1437 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1438 (void *) object, (intptr_t) object->mark(),
1439 object->klass()->external_name());
1440 }
1441 }
1442 if (event.should_commit()) {
1443 post_monitor_inflate_event(&event, object, cause);
1444 }
1445 return m ;
1446 }
1447 }
1448
1449 // Note that we could encounter some performance loss through false-sharing as
1450 // multiple locks occupy the same $ line. Padding might be appropriate.
1451
1452
1453 // Deflate_idle_monitors() is called at all safepoints, immediately
1454 // after all mutators are stopped, but before any objects have moved.
1455 // It traverses the list of known monitors, deflating where possible.
1456 // The scavenged monitor are returned to the monitor free list.
1457 //
1458 // Beware that we scavenge at *every* stop-the-world point.
1459 // Having a large number of monitors in-circulation negatively
1460 // impacts the performance of some applications (e.g., PointBase).
1461 // Broadly, we want to minimize the # of monitors in circulation.
1462 //
1463 // We have added a flag, MonitorInUseLists, which creates a list
1464 // of active monitors for each thread. deflate_idle_monitors()
1465 // only scans the per-thread inuse lists. omAlloc() puts all
1466 // assigned monitors on the per-thread list. deflate_idle_monitors()
1467 // returns the non-busy monitors to the global free list.
1468 // When a thread dies, omFlush() adds the list of active monitors for
1469 // that thread to a global gOmInUseList acquiring the
1470 // global list lock. deflate_idle_monitors() acquires the global
1471 // list lock to scan for non-busy monitors to the global free list.
1472 // An alternative could have used a single global inuse list. The
1473 // downside would have been the additional cost of acquiring the global list lock
1474 // for every omAlloc().
1475 //
1476 // Perversely, the heap size -- and thus the STW safepoint rate --
1477 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1478 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1479 // This is an unfortunate aspect of this design.
1480 //
1481
1482 enum ManifestConstants {
1483 ClearResponsibleAtSTW = 0,
1484 MaximumRecheckInterval = 1000
1485 } ;
1486
1487 // Deflate a single monitor if not in use
1488 // Return true if deflated, false if in use
deflate_monitor(ObjectMonitor * mid,oop obj,ObjectMonitor ** FreeHeadp,ObjectMonitor ** FreeTailp)1489 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1490 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1491 bool deflated;
1492 // Normal case ... The monitor is associated with obj.
1493 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1494 guarantee (mid == obj->mark()->monitor(), "invariant");
1495 guarantee (mid->header()->is_neutral(), "invariant");
1496
1497 if (mid->is_busy()) {
1498 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1499 deflated = false;
1500 } else {
1501 // Deflate the monitor if it is no longer being used
1502 // It's idle - scavenge and return to the global free list
1503 // plain old deflation ...
1504 TEVENT (deflate_idle_monitors - scavenge1) ;
1505 if (TraceMonitorInflation) {
1506 if (obj->is_instance()) {
1507 ResourceMark rm;
1508 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1509 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1510 }
1511 }
1512
1513 // Restore the header back to obj
1514 obj->release_set_mark(mid->header());
1515 mid->clear();
1516
1517 assert (mid->object() == NULL, "invariant") ;
1518
1519 // Move the object to the working free list defined by FreeHead,FreeTail.
1520 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1521 if (*FreeTailp != NULL) {
1522 ObjectMonitor * prevtail = *FreeTailp;
1523 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1524 prevtail->FreeNext = mid;
1525 }
1526 *FreeTailp = mid;
1527 deflated = true;
1528 }
1529 return deflated;
1530 }
1531
1532 // Caller acquires ListLock
walk_monitor_list(ObjectMonitor ** listheadp,ObjectMonitor ** FreeHeadp,ObjectMonitor ** FreeTailp)1533 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1534 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1535 ObjectMonitor* mid;
1536 ObjectMonitor* next;
1537 ObjectMonitor* curmidinuse = NULL;
1538 int deflatedcount = 0;
1539
1540 for (mid = *listheadp; mid != NULL; ) {
1541 oop obj = (oop) mid->object();
1542 bool deflated = false;
1543 if (obj != NULL) {
1544 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
1545 }
1546 if (deflated) {
1547 // extract from per-thread in-use-list
1548 if (mid == *listheadp) {
1549 *listheadp = mid->FreeNext;
1550 } else if (curmidinuse != NULL) {
1551 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1552 }
1553 next = mid->FreeNext;
1554 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
1555 mid = next;
1556 deflatedcount++;
1557 } else {
1558 curmidinuse = mid;
1559 mid = mid->FreeNext;
1560 }
1561 }
1562 return deflatedcount;
1563 }
1564
deflate_idle_monitors()1565 void ObjectSynchronizer::deflate_idle_monitors() {
1566 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1567 int nInuse = 0 ; // currently associated with objects
1568 int nInCirculation = 0 ; // extant
1569 int nScavenged = 0 ; // reclaimed
1570 bool deflated = false;
1571
1572 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
1573 ObjectMonitor * FreeTail = NULL ;
1574
1575 TEVENT (deflate_idle_monitors) ;
1576 // Prevent omFlush from changing mids in Thread dtor's during deflation
1577 // And in case the vm thread is acquiring a lock during a safepoint
1578 // See e.g. 6320749
1579 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1580
1581 if (MonitorInUseLists) {
1582 int inUse = 0;
1583 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1584 nInCirculation+= cur->omInUseCount;
1585 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1586 cur->omInUseCount-= deflatedcount;
1587 // verifyInUse(cur);
1588 nScavenged += deflatedcount;
1589 nInuse += cur->omInUseCount;
1590 }
1591
1592 // For moribund threads, scan gOmInUseList
1593 if (gOmInUseList) {
1594 nInCirculation += gOmInUseCount;
1595 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
1596 gOmInUseCount-= deflatedcount;
1597 nScavenged += deflatedcount;
1598 nInuse += gOmInUseCount;
1599 }
1600
1601 } else {
1602 ObjectMonitor* block =
1603 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
1604 for (; block != NULL; block = (ObjectMonitor*)next(block)) {
1605 // Iterate over all extant monitors - Scavenge all idle monitors.
1606 assert(block->object() == CHAINMARKER, "must be a block header");
1607 nInCirculation += _BLOCKSIZE;
1608 for (int i = 1; i < _BLOCKSIZE; i++) {
1609 ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1610 oop obj = (oop)mid->object();
1611
1612 if (obj == NULL) {
1613 // The monitor is not associated with an object.
1614 // The monitor should either be a thread-specific private
1615 // free list or the global free list.
1616 // obj == NULL IMPLIES mid->is_busy() == 0
1617 guarantee(!mid->is_busy(), "invariant");
1618 continue;
1619 }
1620 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1621
1622 if (deflated) {
1623 mid->FreeNext = NULL;
1624 nScavenged++;
1625 } else {
1626 nInuse++;
1627 }
1628 }
1629 }
1630 }
1631
1632 MonitorFreeCount += nScavenged;
1633
1634 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1635
1636 if (ObjectMonitor::Knob_Verbose) {
1637 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1638 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1639 MonitorPopulation, MonitorFreeCount) ;
1640 ::fflush(stdout) ;
1641 }
1642
1643 ForceMonitorScavenge = 0; // Reset
1644
1645 // Move the scavenged monitors back to the global free list.
1646 if (FreeHead != NULL) {
1647 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
1648 assert (FreeTail->FreeNext == NULL, "invariant") ;
1649 // constant-time list splice - prepend scavenged segment to gFreeList
1650 FreeTail->FreeNext = gFreeList ;
1651 gFreeList = FreeHead ;
1652 }
1653 Thread::muxRelease (&ListLock) ;
1654
1655 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
1656 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
1657
1658 // TODO: Add objectMonitor leak detection.
1659 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1660 GVars.stwRandom = os::random() ;
1661 GVars.stwCycle ++ ;
1662 }
1663
1664 // Monitor cleanup on JavaThread::exit
1665
1666 // Iterate through monitor cache and attempt to release thread's monitors
1667 // Gives up on a particular monitor if an exception occurs, but continues
1668 // the overall iteration, swallowing the exception.
1669 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1670 private:
1671 TRAPS;
1672
1673 public:
ReleaseJavaMonitorsClosure(Thread * thread)1674 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
do_monitor(ObjectMonitor * mid)1675 void do_monitor(ObjectMonitor* mid) {
1676 if (mid->owner() == THREAD) {
1677 (void)mid->complete_exit(CHECK);
1678 }
1679 }
1680 };
1681
1682 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1683 // ignored. This is meant to be called during JNI thread detach which assumes
1684 // all remaining monitors are heavyweight. All exceptions are swallowed.
1685 // Scanning the extant monitor list can be time consuming.
1686 // A simple optimization is to add a per-thread flag that indicates a thread
1687 // called jni_monitorenter() during its lifetime.
1688 //
1689 // Instead of No_Savepoint_Verifier it might be cheaper to
1690 // use an idiom of the form:
1691 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1692 // <code that must not run at safepoint>
1693 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1694 // Since the tests are extremely cheap we could leave them enabled
1695 // for normal product builds.
1696
release_monitors_owned_by_thread(TRAPS)1697 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1698 assert(THREAD == JavaThread::current(), "must be current Java thread");
1699 No_Safepoint_Verifier nsv ;
1700 ReleaseJavaMonitorsClosure rjmc(THREAD);
1701 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1702 ObjectSynchronizer::monitors_iterate(&rjmc);
1703 Thread::muxRelease(&ListLock);
1704 THREAD->clear_pending_exception();
1705 }
1706
1707 //------------------------------------------------------------------------------
1708 // Debugging code
1709
sanity_checks(const bool verbose,const uint cache_line_size,int * error_cnt_ptr,int * warning_cnt_ptr)1710 void ObjectSynchronizer::sanity_checks(const bool verbose,
1711 const uint cache_line_size,
1712 int *error_cnt_ptr,
1713 int *warning_cnt_ptr) {
1714 u_char *addr_begin = (u_char*)&GVars;
1715 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
1716 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1717
1718 if (verbose) {
1719 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1720 sizeof(SharedGlobals));
1721 }
1722
1723 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1724 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1725
1726 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1727 if (verbose) {
1728 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1729 }
1730
1731 if (cache_line_size != 0) {
1732 // We were able to determine the L1 data cache line size so
1733 // do some cache line specific sanity checks
1734
1735 if (offset_stwRandom < cache_line_size) {
1736 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1737 "to the struct beginning than a cache line which permits "
1738 "false sharing.");
1739 (*warning_cnt_ptr)++;
1740 }
1741
1742 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1743 tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1744 "SharedGlobals.hcSequence fields are closer than a cache "
1745 "line which permits false sharing.");
1746 (*warning_cnt_ptr)++;
1747 }
1748
1749 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1750 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1751 "to the struct end than a cache line which permits false "
1752 "sharing.");
1753 (*warning_cnt_ptr)++;
1754 }
1755 }
1756 }
1757
1758 #ifndef PRODUCT
1759
1760 // Verify all monitors in the monitor cache, the verification is weak.
verify()1761 void ObjectSynchronizer::verify() {
1762 ObjectMonitor* block =
1763 (ObjectMonitor *)OrderAccess::load_ptr_acquire(&gBlockList);
1764 while (block != NULL) {
1765 assert(block->object() == CHAINMARKER, "must be a block header");
1766 for (int i = 1; i < _BLOCKSIZE; i++) {
1767 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1768 oop object = (oop)mid->object();
1769 if (object != NULL) {
1770 mid->verify();
1771 }
1772 }
1773 block = (ObjectMonitor*) block->FreeNext;
1774 }
1775 }
1776
1777 // Check if monitor belongs to the monitor cache
1778 // The list is grow-only so it's *relatively* safe to traverse
1779 // the list of extant blocks without taking a lock.
1780
verify_objmon_isinpool(ObjectMonitor * monitor)1781 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1782 ObjectMonitor* block =
1783 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
1784 while (block != NULL) {
1785 assert(block->object() == CHAINMARKER, "must be a block header");
1786 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
1787 address mon = (address)monitor;
1788 address blk = (address)block;
1789 size_t diff = mon - blk;
1790 assert((diff % sizeof(ObjectMonitor)) == 0, "must be aligned");
1791 return 1;
1792 }
1793 block = (ObjectMonitor*)block->FreeNext;
1794 }
1795 return 0;
1796 }
1797
1798 #endif
1799