1 /*
2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "jfr/jfrEvents.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/metaspaceShared.hpp"
31 #include "memory/padded.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "oops/markOop.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "runtime/atomic.hpp"
36 #include "runtime/biasedLocking.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/interfaceSupport.inline.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "runtime/objectMonitor.hpp"
41 #include "runtime/objectMonitor.inline.hpp"
42 #include "runtime/osThread.hpp"
43 #include "runtime/safepointVerifiers.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "runtime/synchronizer.hpp"
47 #include "runtime/thread.inline.hpp"
48 #include "runtime/timer.hpp"
49 #include "runtime/vframe.hpp"
50 #include "runtime/vmThread.hpp"
51 #include "utilities/align.hpp"
52 #include "utilities/dtrace.hpp"
53 #include "utilities/events.hpp"
54 #include "utilities/preserveException.hpp"
55
56 // The "core" versions of monitor enter and exit reside in this file.
57 // The interpreter and compilers contain specialized transliterated
58 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
59 // for instance. If you make changes here, make sure to modify the
60 // interpreter, and both C1 and C2 fast-path inline locking code emission.
61 //
62 // -----------------------------------------------------------------------------
63
64 #ifdef DTRACE_ENABLED
65
66 // Only bother with this argument setup if dtrace is available
67 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
68
69 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
70 char* bytes = NULL; \
71 int len = 0; \
72 jlong jtid = SharedRuntime::get_java_tid(thread); \
73 Symbol* klassname = ((oop)(obj))->klass()->name(); \
74 if (klassname != NULL) { \
75 bytes = (char*)klassname->bytes(); \
76 len = klassname->utf8_length(); \
77 }
78
79 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
80 { \
81 if (DTraceMonitorProbes) { \
82 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
83 HOTSPOT_MONITOR_WAIT(jtid, \
84 (uintptr_t)(monitor), bytes, len, (millis)); \
85 } \
86 }
87
88 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY
89 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL
90 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
91
92 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
93 { \
94 if (DTraceMonitorProbes) { \
95 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
96 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
97 (uintptr_t)(monitor), bytes, len); \
98 } \
99 }
100
101 #else // ndef DTRACE_ENABLED
102
103 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
104 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
105
106 #endif // ndef DTRACE_ENABLED
107
108 // This exists only as a workaround of dtrace bug 6254741
dtrace_waited_probe(ObjectMonitor * monitor,Handle obj,Thread * thr)109 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
110 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
111 return 0;
112 }
113
114 #define NINFLATIONLOCKS 256
115 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
116
117 // global list of blocks of monitors
118 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
119 // global monitor free list
120 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
121 // global monitor in-use list, for moribund threads,
122 // monitors they inflated need to be scanned for deflation
123 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
124 // count of entries in gOmInUseList
125 int ObjectSynchronizer::gOmInUseCount = 0;
126
127 static volatile intptr_t gListLock = 0; // protects global monitor lists
128 static volatile int gMonitorFreeCount = 0; // # on gFreeList
129 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
130
131 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
132
133
134 // =====================> Quick functions
135
136 // The quick_* forms are special fast-path variants used to improve
137 // performance. In the simplest case, a "quick_*" implementation could
138 // simply return false, in which case the caller will perform the necessary
139 // state transitions and call the slow-path form.
140 // The fast-path is designed to handle frequently arising cases in an efficient
141 // manner and is just a degenerate "optimistic" variant of the slow-path.
142 // returns true -- to indicate the call was satisfied.
143 // returns false -- to indicate the call needs the services of the slow-path.
144 // A no-loitering ordinance is in effect for code in the quick_* family
145 // operators: safepoints or indefinite blocking (blocking that might span a
146 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
147 // entry.
148 //
149 // Consider: An interesting optimization is to have the JIT recognize the
150 // following common idiom:
151 // synchronized (someobj) { .... ; notify(); }
152 // That is, we find a notify() or notifyAll() call that immediately precedes
153 // the monitorexit operation. In that case the JIT could fuse the operations
154 // into a single notifyAndExit() runtime primitive.
155
quick_notify(oopDesc * obj,Thread * self,bool all)156 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
157 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
158 assert(self->is_Java_thread(), "invariant");
159 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
160 NoSafepointVerifier nsv;
161 if (obj == NULL) return false; // slow-path for invalid obj
162 const markOop mark = obj->mark();
163
164 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
165 // Degenerate notify
166 // stack-locked by caller so by definition the implied waitset is empty.
167 return true;
168 }
169
170 if (mark->has_monitor()) {
171 ObjectMonitor * const mon = mark->monitor();
172 assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
173 if (mon->owner() != self) return false; // slow-path for IMS exception
174
175 if (mon->first_waiter() != NULL) {
176 // We have one or more waiters. Since this is an inflated monitor
177 // that we own, we can transfer one or more threads from the waitset
178 // to the entrylist here and now, avoiding the slow-path.
179 if (all) {
180 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
181 } else {
182 DTRACE_MONITOR_PROBE(notify, mon, obj, self);
183 }
184 int tally = 0;
185 do {
186 mon->INotify(self);
187 ++tally;
188 } while (mon->first_waiter() != NULL && all);
189 OM_PERFDATA_OP(Notifications, inc(tally));
190 }
191 return true;
192 }
193
194 // biased locking and any other IMS exception states take the slow-path
195 return false;
196 }
197
198
199 // The LockNode emitted directly at the synchronization site would have
200 // been too big if it were to have included support for the cases of inflated
201 // recursive enter and exit, so they go here instead.
202 // Note that we can't safely call AsyncPrintJavaStack() from within
203 // quick_enter() as our thread state remains _in_Java.
204
quick_enter(oop obj,Thread * Self,BasicLock * lock)205 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
206 BasicLock * lock) {
207 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
208 assert(Self->is_Java_thread(), "invariant");
209 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
210 NoSafepointVerifier nsv;
211 if (obj == NULL) return false; // Need to throw NPE
212 const markOop mark = obj->mark();
213
214 if (mark->has_monitor()) {
215 ObjectMonitor * const m = mark->monitor();
216 assert(oopDesc::equals((oop) m->object(), obj), "invariant");
217 Thread * const owner = (Thread *) m->_owner;
218
219 // Lock contention and Transactional Lock Elision (TLE) diagnostics
220 // and observability
221 // Case: light contention possibly amenable to TLE
222 // Case: TLE inimical operations such as nested/recursive synchronization
223
224 if (owner == Self) {
225 m->_recursions++;
226 return true;
227 }
228
229 // This Java Monitor is inflated so obj's header will never be
230 // displaced to this thread's BasicLock. Make the displaced header
231 // non-NULL so this BasicLock is not seen as recursive nor as
232 // being locked. We do this unconditionally so that this thread's
233 // BasicLock cannot be mis-interpreted by any stack walkers. For
234 // performance reasons, stack walkers generally first check for
235 // Biased Locking in the object's header, the second check is for
236 // stack-locking in the object's header, the third check is for
237 // recursive stack-locking in the displaced header in the BasicLock,
238 // and last are the inflated Java Monitor (ObjectMonitor) checks.
239 lock->set_displaced_header(markOopDesc::unused_mark());
240
241 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
242 assert(m->_recursions == 0, "invariant");
243 assert(m->_owner == Self, "invariant");
244 return true;
245 }
246 }
247
248 // Note that we could inflate in quick_enter.
249 // This is likely a useful optimization
250 // Critically, in quick_enter() we must not:
251 // -- perform bias revocation, or
252 // -- block indefinitely, or
253 // -- reach a safepoint
254
255 return false; // revert to slow-path
256 }
257
258 // -----------------------------------------------------------------------------
259 // Fast Monitor Enter/Exit
260 // This the fast monitor enter. The interpreter and compiler use
261 // some assembly copies of this code. Make sure update those code
262 // if the following function is changed. The implementation is
263 // extremely sensitive to race condition. Be careful.
264
fast_enter(Handle obj,BasicLock * lock,bool attempt_rebias,TRAPS)265 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
266 bool attempt_rebias, TRAPS) {
267 if (UseBiasedLocking) {
268 if (!SafepointSynchronize::is_at_safepoint()) {
269 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
270 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
271 return;
272 }
273 } else {
274 assert(!attempt_rebias, "can not rebias toward VM thread");
275 BiasedLocking::revoke_at_safepoint(obj);
276 }
277 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
278 }
279
280 slow_enter(obj, lock, THREAD);
281 }
282
fast_exit(oop object,BasicLock * lock,TRAPS)283 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
284 markOop mark = object->mark();
285 // We cannot check for Biased Locking if we are racing an inflation.
286 assert(mark == markOopDesc::INFLATING() ||
287 !mark->has_bias_pattern(), "should not see bias pattern here");
288
289 markOop dhw = lock->displaced_header();
290 if (dhw == NULL) {
291 // If the displaced header is NULL, then this exit matches up with
292 // a recursive enter. No real work to do here except for diagnostics.
293 #ifndef PRODUCT
294 if (mark != markOopDesc::INFLATING()) {
295 // Only do diagnostics if we are not racing an inflation. Simply
296 // exiting a recursive enter of a Java Monitor that is being
297 // inflated is safe; see the has_monitor() comment below.
298 assert(!mark->is_neutral(), "invariant");
299 assert(!mark->has_locker() ||
300 THREAD->is_lock_owned((address)mark->locker()), "invariant");
301 if (mark->has_monitor()) {
302 // The BasicLock's displaced_header is marked as a recursive
303 // enter and we have an inflated Java Monitor (ObjectMonitor).
304 // This is a special case where the Java Monitor was inflated
305 // after this thread entered the stack-lock recursively. When a
306 // Java Monitor is inflated, we cannot safely walk the Java
307 // Monitor owner's stack and update the BasicLocks because a
308 // Java Monitor can be asynchronously inflated by a thread that
309 // does not own the Java Monitor.
310 ObjectMonitor * m = mark->monitor();
311 assert(((oop)(m->object()))->mark() == mark, "invariant");
312 assert(m->is_entered(THREAD), "invariant");
313 }
314 }
315 #endif
316 return;
317 }
318
319 if (mark == (markOop) lock) {
320 // If the object is stack-locked by the current thread, try to
321 // swing the displaced header from the BasicLock back to the mark.
322 assert(dhw->is_neutral(), "invariant");
323 if (object->cas_set_mark(dhw, mark) == mark) {
324 return;
325 }
326 }
327
328 // We have to take the slow-path of possible inflation and then exit.
329 ObjectSynchronizer::inflate(THREAD,
330 object,
331 inflate_cause_vm_internal)->exit(true, THREAD);
332 }
333
334 // -----------------------------------------------------------------------------
335 // Interpreter/Compiler Slow Case
336 // This routine is used to handle interpreter/compiler slow case
337 // We don't need to use fast path here, because it must have been
338 // failed in the interpreter/compiler code.
slow_enter(Handle obj,BasicLock * lock,TRAPS)339 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
340 markOop mark = obj->mark();
341 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
342
343 if (mark->is_neutral()) {
344 // Anticipate successful CAS -- the ST of the displaced mark must
345 // be visible <= the ST performed by the CAS.
346 lock->set_displaced_header(mark);
347 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
348 return;
349 }
350 // Fall through to inflate() ...
351 } else if (mark->has_locker() &&
352 THREAD->is_lock_owned((address)mark->locker())) {
353 assert(lock != mark->locker(), "must not re-lock the same lock");
354 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
355 lock->set_displaced_header(NULL);
356 return;
357 }
358
359 // The object header will never be displaced to this lock,
360 // so it does not matter what the value is, except that it
361 // must be non-zero to avoid looking like a re-entrant lock,
362 // and must not look locked either.
363 lock->set_displaced_header(markOopDesc::unused_mark());
364 ObjectSynchronizer::inflate(THREAD,
365 obj(),
366 inflate_cause_monitor_enter)->enter(THREAD);
367 }
368
369 // This routine is used to handle interpreter/compiler slow case
370 // We don't need to use fast path here, because it must have
371 // failed in the interpreter/compiler code. Simply use the heavy
372 // weight monitor should be ok, unless someone find otherwise.
slow_exit(oop object,BasicLock * lock,TRAPS)373 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
374 fast_exit(object, lock, THREAD);
375 }
376
377 // -----------------------------------------------------------------------------
378 // Class Loader support to workaround deadlocks on the class loader lock objects
379 // Also used by GC
380 // complete_exit()/reenter() are used to wait on a nested lock
381 // i.e. to give up an outer lock completely and then re-enter
382 // Used when holding nested locks - lock acquisition order: lock1 then lock2
383 // 1) complete_exit lock1 - saving recursion count
384 // 2) wait on lock2
385 // 3) when notified on lock2, unlock lock2
386 // 4) reenter lock1 with original recursion count
387 // 5) lock lock2
388 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
complete_exit(Handle obj,TRAPS)389 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
390 if (UseBiasedLocking) {
391 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
392 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
393 }
394
395 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
396 obj(),
397 inflate_cause_vm_internal);
398
399 return monitor->complete_exit(THREAD);
400 }
401
402 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
reenter(Handle obj,intptr_t recursion,TRAPS)403 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
404 if (UseBiasedLocking) {
405 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
406 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
407 }
408
409 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
410 obj(),
411 inflate_cause_vm_internal);
412
413 monitor->reenter(recursion, THREAD);
414 }
415 // -----------------------------------------------------------------------------
416 // JNI locks on java objects
417 // NOTE: must use heavy weight monitor to handle jni monitor enter
jni_enter(Handle obj,TRAPS)418 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
419 // the current locking is from JNI instead of Java code
420 if (UseBiasedLocking) {
421 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
423 }
424 THREAD->set_current_pending_monitor_is_from_java(false);
425 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
426 THREAD->set_current_pending_monitor_is_from_java(true);
427 }
428
429 // NOTE: must use heavy weight monitor to handle jni monitor exit
jni_exit(oop obj,Thread * THREAD)430 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
431 if (UseBiasedLocking) {
432 Handle h_obj(THREAD, obj);
433 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
434 obj = h_obj();
435 }
436 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
437
438 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
439 obj,
440 inflate_cause_jni_exit);
441 // If this thread has locked the object, exit the monitor. Note: can't use
442 // monitor->check(CHECK); must exit even if an exception is pending.
443 if (monitor->check(THREAD)) {
444 monitor->exit(true, THREAD);
445 }
446 }
447
448 // -----------------------------------------------------------------------------
449 // Internal VM locks on java objects
450 // standard constructor, allows locking failures
ObjectLocker(Handle obj,Thread * thread,bool doLock)451 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
452 _dolock = doLock;
453 _thread = thread;
454 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
455 _obj = obj;
456
457 if (_dolock) {
458 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
459 }
460 }
461
~ObjectLocker()462 ObjectLocker::~ObjectLocker() {
463 if (_dolock) {
464 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
465 }
466 }
467
468
469 // -----------------------------------------------------------------------------
470 // Wait/Notify/NotifyAll
471 // NOTE: must use heavy weight monitor to handle wait()
wait(Handle obj,jlong millis,TRAPS)472 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
473 if (UseBiasedLocking) {
474 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
475 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
476 }
477 if (millis < 0) {
478 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
479 }
480 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
481 obj(),
482 inflate_cause_wait);
483
484 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
485 monitor->wait(millis, true, THREAD);
486
487 // This dummy call is in place to get around dtrace bug 6254741. Once
488 // that's fixed we can uncomment the following line, remove the call
489 // and change this function back into a "void" func.
490 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
491 return dtrace_waited_probe(monitor, obj, THREAD);
492 }
493
waitUninterruptibly(Handle obj,jlong millis,TRAPS)494 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
495 if (UseBiasedLocking) {
496 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
497 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
498 }
499 if (millis < 0) {
500 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
501 }
502 ObjectSynchronizer::inflate(THREAD,
503 obj(),
504 inflate_cause_wait)->wait(millis, false, THREAD);
505 }
506
notify(Handle obj,TRAPS)507 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
508 if (UseBiasedLocking) {
509 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
510 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
511 }
512
513 markOop mark = obj->mark();
514 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
515 return;
516 }
517 ObjectSynchronizer::inflate(THREAD,
518 obj(),
519 inflate_cause_notify)->notify(THREAD);
520 }
521
522 // NOTE: see comment of notify()
notifyall(Handle obj,TRAPS)523 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
524 if (UseBiasedLocking) {
525 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
526 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
527 }
528
529 markOop mark = obj->mark();
530 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
531 return;
532 }
533 ObjectSynchronizer::inflate(THREAD,
534 obj(),
535 inflate_cause_notify)->notifyAll(THREAD);
536 }
537
538 // -----------------------------------------------------------------------------
539 // Hash Code handling
540 //
541 // Performance concern:
542 // OrderAccess::storestore() calls release() which at one time stored 0
543 // into the global volatile OrderAccess::dummy variable. This store was
544 // unnecessary for correctness. Many threads storing into a common location
545 // causes considerable cache migration or "sloshing" on large SMP systems.
546 // As such, I avoided using OrderAccess::storestore(). In some cases
547 // OrderAccess::fence() -- which incurs local latency on the executing
548 // processor -- is a better choice as it scales on SMP systems.
549 //
550 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
551 // a discussion of coherency costs. Note that all our current reference
552 // platforms provide strong ST-ST order, so the issue is moot on IA32,
553 // x64, and SPARC.
554 //
555 // As a general policy we use "volatile" to control compiler-based reordering
556 // and explicit fences (barriers) to control for architectural reordering
557 // performed by the CPU(s) or platform.
558
559 struct SharedGlobals {
560 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
561 // These are highly shared mostly-read variables.
562 // To avoid false-sharing they need to be the sole occupants of a cache line.
563 volatile int stwRandom;
564 volatile int stwCycle;
565 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
566 // Hot RW variable -- Sequester to avoid false-sharing
567 volatile int hcSequence;
568 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
569 };
570
571 static SharedGlobals GVars;
572 static int MonitorScavengeThreshold = 1000000;
573 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
574
ReadStableMark(oop obj)575 static markOop ReadStableMark(oop obj) {
576 markOop mark = obj->mark();
577 if (!mark->is_being_inflated()) {
578 return mark; // normal fast-path return
579 }
580
581 int its = 0;
582 for (;;) {
583 markOop mark = obj->mark();
584 if (!mark->is_being_inflated()) {
585 return mark; // normal fast-path return
586 }
587
588 // The object is being inflated by some other thread.
589 // The caller of ReadStableMark() must wait for inflation to complete.
590 // Avoid live-lock
591 // TODO: consider calling SafepointSynchronize::do_call_back() while
592 // spinning to see if there's a safepoint pending. If so, immediately
593 // yielding or blocking would be appropriate. Avoid spinning while
594 // there is a safepoint pending.
595 // TODO: add inflation contention performance counters.
596 // TODO: restrict the aggregate number of spinners.
597
598 ++its;
599 if (its > 10000 || !os::is_MP()) {
600 if (its & 1) {
601 os::naked_yield();
602 } else {
603 // Note that the following code attenuates the livelock problem but is not
604 // a complete remedy. A more complete solution would require that the inflating
605 // thread hold the associated inflation lock. The following code simply restricts
606 // the number of spinners to at most one. We'll have N-2 threads blocked
607 // on the inflationlock, 1 thread holding the inflation lock and using
608 // a yield/park strategy, and 1 thread in the midst of inflation.
609 // A more refined approach would be to change the encoding of INFLATING
610 // to allow encapsulation of a native thread pointer. Threads waiting for
611 // inflation to complete would use CAS to push themselves onto a singly linked
612 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
613 // and calling park(). When inflation was complete the thread that accomplished inflation
614 // would detach the list and set the markword to inflated with a single CAS and
615 // then for each thread on the list, set the flag and unpark() the thread.
616 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
617 // wakes at most one thread whereas we need to wake the entire list.
618 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1);
619 int YieldThenBlock = 0;
620 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant");
621 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant");
622 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock");
623 while (obj->mark() == markOopDesc::INFLATING()) {
624 // Beware: NakedYield() is advisory and has almost no effect on some platforms
625 // so we periodically call Self->_ParkEvent->park(1).
626 // We use a mixed spin/yield/block mechanism.
627 if ((YieldThenBlock++) >= 16) {
628 Thread::current()->_ParkEvent->park(1);
629 } else {
630 os::naked_yield();
631 }
632 }
633 Thread::muxRelease(gInflationLocks + ix);
634 }
635 } else {
636 SpinPause(); // SMP-polite spinning
637 }
638 }
639 }
640
641 // hashCode() generation :
642 //
643 // Possibilities:
644 // * MD5Digest of {obj,stwRandom}
645 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
646 // * A DES- or AES-style SBox[] mechanism
647 // * One of the Phi-based schemes, such as:
648 // 2654435761 = 2^32 * Phi (golden ratio)
649 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
650 // * A variation of Marsaglia's shift-xor RNG scheme.
651 // * (obj ^ stwRandom) is appealing, but can result
652 // in undesirable regularity in the hashCode values of adjacent objects
653 // (objects allocated back-to-back, in particular). This could potentially
654 // result in hashtable collisions and reduced hashtable efficiency.
655 // There are simple ways to "diffuse" the middle address bits over the
656 // generated hashCode values:
657
get_next_hash(Thread * Self,oop obj)658 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
659 intptr_t value = 0;
660 if (hashCode == 0) {
661 // This form uses global Park-Miller RNG.
662 // On MP system we'll have lots of RW access to a global, so the
663 // mechanism induces lots of coherency traffic.
664 value = os::random();
665 } else if (hashCode == 1) {
666 // This variation has the property of being stable (idempotent)
667 // between STW operations. This can be useful in some of the 1-0
668 // synchronization schemes.
669 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3;
670 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom;
671 } else if (hashCode == 2) {
672 value = 1; // for sensitivity testing
673 } else if (hashCode == 3) {
674 value = ++GVars.hcSequence;
675 } else if (hashCode == 4) {
676 value = cast_from_oop<intptr_t>(obj);
677 } else {
678 // Marsaglia's xor-shift scheme with thread-specific state
679 // This is probably the best overall implementation -- we'll
680 // likely make this the default in future releases.
681 unsigned t = Self->_hashStateX;
682 t ^= (t << 11);
683 Self->_hashStateX = Self->_hashStateY;
684 Self->_hashStateY = Self->_hashStateZ;
685 Self->_hashStateZ = Self->_hashStateW;
686 unsigned v = Self->_hashStateW;
687 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
688 Self->_hashStateW = v;
689 value = v;
690 }
691
692 value &= markOopDesc::hash_mask;
693 if (value == 0) value = 0xBAD;
694 assert(value != markOopDesc::no_hash, "invariant");
695 return value;
696 }
697
FastHashCode(Thread * Self,oop obj)698 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
699 if (UseBiasedLocking) {
700 // NOTE: many places throughout the JVM do not expect a safepoint
701 // to be taken here, in particular most operations on perm gen
702 // objects. However, we only ever bias Java instances and all of
703 // the call sites of identity_hash that might revoke biases have
704 // been checked to make sure they can handle a safepoint. The
705 // added check of the bias pattern is to avoid useless calls to
706 // thread-local storage.
707 if (obj->mark()->has_bias_pattern()) {
708 // Handle for oop obj in case of STW safepoint
709 Handle hobj(Self, obj);
710 // Relaxing assertion for bug 6320749.
711 assert(Universe::verify_in_progress() ||
712 !SafepointSynchronize::is_at_safepoint(),
713 "biases should not be seen by VM thread here");
714 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
715 obj = hobj();
716 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
717 }
718 }
719
720 // hashCode() is a heap mutator ...
721 // Relaxing assertion for bug 6320749.
722 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
723 !SafepointSynchronize::is_at_safepoint(), "invariant");
724 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
725 Self->is_Java_thread() , "invariant");
726 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
727 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
728
729 ObjectMonitor* monitor = NULL;
730 markOop temp, test;
731 intptr_t hash;
732 markOop mark = ReadStableMark(obj);
733
734 // object should remain ineligible for biased locking
735 assert(!mark->has_bias_pattern(), "invariant");
736
737 if (mark->is_neutral()) {
738 hash = mark->hash(); // this is a normal header
739 if (hash) { // if it has hash, just return it
740 return hash;
741 }
742 hash = get_next_hash(Self, obj); // allocate a new hash code
743 temp = mark->copy_set_hash(hash); // merge the hash code into header
744 // use (machine word version) atomic operation to install the hash
745 test = obj->cas_set_mark(temp, mark);
746 if (test == mark) {
747 return hash;
748 }
749 // If atomic operation failed, we must inflate the header
750 // into heavy weight monitor. We could add more code here
751 // for fast path, but it does not worth the complexity.
752 } else if (mark->has_monitor()) {
753 monitor = mark->monitor();
754 temp = monitor->header();
755 assert(temp->is_neutral(), "invariant");
756 hash = temp->hash();
757 if (hash) {
758 return hash;
759 }
760 // Skip to the following code to reduce code size
761 } else if (Self->is_lock_owned((address)mark->locker())) {
762 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
763 assert(temp->is_neutral(), "invariant");
764 hash = temp->hash(); // by current thread, check if the displaced
765 if (hash) { // header contains hash code
766 return hash;
767 }
768 // WARNING:
769 // The displaced header is strictly immutable.
770 // It can NOT be changed in ANY cases. So we have
771 // to inflate the header into heavyweight monitor
772 // even the current thread owns the lock. The reason
773 // is the BasicLock (stack slot) will be asynchronously
774 // read by other threads during the inflate() function.
775 // Any change to stack may not propagate to other threads
776 // correctly.
777 }
778
779 // Inflate the monitor to set hash code
780 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
781 // Load displaced header and check it has hash code
782 mark = monitor->header();
783 assert(mark->is_neutral(), "invariant");
784 hash = mark->hash();
785 if (hash == 0) {
786 hash = get_next_hash(Self, obj);
787 temp = mark->copy_set_hash(hash); // merge hash code into header
788 assert(temp->is_neutral(), "invariant");
789 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
790 if (test != mark) {
791 // The only update to the header in the monitor (outside GC)
792 // is install the hash code. If someone add new usage of
793 // displaced header, please update this code
794 hash = test->hash();
795 assert(test->is_neutral(), "invariant");
796 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
797 }
798 }
799 // We finally get the hash
800 return hash;
801 }
802
803 // Deprecated -- use FastHashCode() instead.
804
identity_hash_value_for(Handle obj)805 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
806 return FastHashCode(Thread::current(), obj());
807 }
808
809
current_thread_holds_lock(JavaThread * thread,Handle h_obj)810 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
811 Handle h_obj) {
812 if (UseBiasedLocking) {
813 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
814 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
815 }
816
817 assert(thread == JavaThread::current(), "Can only be called on current thread");
818 oop obj = h_obj();
819
820 markOop mark = ReadStableMark(obj);
821
822 // Uncontended case, header points to stack
823 if (mark->has_locker()) {
824 return thread->is_lock_owned((address)mark->locker());
825 }
826 // Contended case, header points to ObjectMonitor (tagged pointer)
827 if (mark->has_monitor()) {
828 ObjectMonitor* monitor = mark->monitor();
829 return monitor->is_entered(thread) != 0;
830 }
831 // Unlocked case, header in place
832 assert(mark->is_neutral(), "sanity check");
833 return false;
834 }
835
836 // Be aware of this method could revoke bias of the lock object.
837 // This method queries the ownership of the lock handle specified by 'h_obj'.
838 // If the current thread owns the lock, it returns owner_self. If no
839 // thread owns the lock, it returns owner_none. Otherwise, it will return
840 // owner_other.
query_lock_ownership(JavaThread * self,Handle h_obj)841 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
842 (JavaThread *self, Handle h_obj) {
843 // The caller must beware this method can revoke bias, and
844 // revocation can result in a safepoint.
845 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
846 assert(self->thread_state() != _thread_blocked, "invariant");
847
848 // Possible mark states: neutral, biased, stack-locked, inflated
849
850 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
851 // CASE: biased
852 BiasedLocking::revoke_and_rebias(h_obj, false, self);
853 assert(!h_obj->mark()->has_bias_pattern(),
854 "biases should be revoked by now");
855 }
856
857 assert(self == JavaThread::current(), "Can only be called on current thread");
858 oop obj = h_obj();
859 markOop mark = ReadStableMark(obj);
860
861 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
862 if (mark->has_locker()) {
863 return self->is_lock_owned((address)mark->locker()) ?
864 owner_self : owner_other;
865 }
866
867 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
868 // The Object:ObjectMonitor relationship is stable as long as we're
869 // not at a safepoint.
870 if (mark->has_monitor()) {
871 void * owner = mark->monitor()->_owner;
872 if (owner == NULL) return owner_none;
873 return (owner == self ||
874 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
875 }
876
877 // CASE: neutral
878 assert(mark->is_neutral(), "sanity check");
879 return owner_none; // it's unlocked
880 }
881
882 // FIXME: jvmti should call this
get_lock_owner(ThreadsList * t_list,Handle h_obj)883 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
884 if (UseBiasedLocking) {
885 if (SafepointSynchronize::is_at_safepoint()) {
886 BiasedLocking::revoke_at_safepoint(h_obj);
887 } else {
888 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
889 }
890 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
891 }
892
893 oop obj = h_obj();
894 address owner = NULL;
895
896 markOop mark = ReadStableMark(obj);
897
898 // Uncontended case, header points to stack
899 if (mark->has_locker()) {
900 owner = (address) mark->locker();
901 }
902
903 // Contended case, header points to ObjectMonitor (tagged pointer)
904 if (mark->has_monitor()) {
905 ObjectMonitor* monitor = mark->monitor();
906 assert(monitor != NULL, "monitor should be non-null");
907 owner = (address) monitor->owner();
908 }
909
910 if (owner != NULL) {
911 // owning_thread_from_monitor_owner() may also return NULL here
912 return Threads::owning_thread_from_monitor_owner(t_list, owner);
913 }
914
915 // Unlocked case, header in place
916 // Cannot have assertion since this object may have been
917 // locked by another thread when reaching here.
918 // assert(mark->is_neutral(), "sanity check");
919
920 return NULL;
921 }
922
923 // Visitors ...
924
monitors_iterate(MonitorClosure * closure)925 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
926 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
927 while (block != NULL) {
928 assert(block->object() == CHAINMARKER, "must be a block header");
929 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
930 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
931 oop object = (oop)mid->object();
932 if (object != NULL) {
933 closure->do_monitor(mid);
934 }
935 }
936 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
937 }
938 }
939
940 // Get the next block in the block list.
next(PaddedEnd<ObjectMonitor> * block)941 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) {
942 assert(block->object() == CHAINMARKER, "must be a block header");
943 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext;
944 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
945 return block;
946 }
947
monitors_used_above_threshold()948 static bool monitors_used_above_threshold() {
949 if (gMonitorPopulation == 0) {
950 return false;
951 }
952 int monitors_used = gMonitorPopulation - gMonitorFreeCount;
953 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation;
954 return monitor_usage > MonitorUsedDeflationThreshold;
955 }
956
is_cleanup_needed()957 bool ObjectSynchronizer::is_cleanup_needed() {
958 if (MonitorUsedDeflationThreshold > 0) {
959 return monitors_used_above_threshold();
960 }
961 return false;
962 }
963
oops_do(OopClosure * f)964 void ObjectSynchronizer::oops_do(OopClosure* f) {
965 // We only scan the global used list here (for moribund threads), and
966 // the thread-local monitors in Thread::oops_do().
967 global_used_oops_do(f);
968 }
969
global_used_oops_do(OopClosure * f)970 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
971 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
972 list_oops_do(gOmInUseList, f);
973 }
974
thread_local_used_oops_do(Thread * thread,OopClosure * f)975 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
976 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
977 list_oops_do(thread->omInUseList, f);
978 }
979
list_oops_do(ObjectMonitor * list,OopClosure * f)980 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
981 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
982 ObjectMonitor* mid;
983 for (mid = list; mid != NULL; mid = mid->FreeNext) {
984 if (mid->object() != NULL) {
985 f->do_oop((oop*)mid->object_addr());
986 }
987 }
988 }
989
990
991 // -----------------------------------------------------------------------------
992 // ObjectMonitor Lifecycle
993 // -----------------------
994 // Inflation unlinks monitors from the global gFreeList and
995 // associates them with objects. Deflation -- which occurs at
996 // STW-time -- disassociates idle monitors from objects. Such
997 // scavenged monitors are returned to the gFreeList.
998 //
999 // The global list is protected by gListLock. All the critical sections
1000 // are short and operate in constant-time.
1001 //
1002 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1003 //
1004 // Lifecycle:
1005 // -- unassigned and on the global free list
1006 // -- unassigned and on a thread's private omFreeList
1007 // -- assigned to an object. The object is inflated and the mark refers
1008 // to the objectmonitor.
1009
1010
1011 // Constraining monitor pool growth via MonitorBound ...
1012 //
1013 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1014 // the rate of scavenging is driven primarily by GC. As such, we can find
1015 // an inordinate number of monitors in circulation.
1016 // To avoid that scenario we can artificially induce a STW safepoint
1017 // if the pool appears to be growing past some reasonable bound.
1018 // Generally we favor time in space-time tradeoffs, but as there's no
1019 // natural back-pressure on the # of extant monitors we need to impose some
1020 // type of limit. Beware that if MonitorBound is set to too low a value
1021 // we could just loop. In addition, if MonitorBound is set to a low value
1022 // we'll incur more safepoints, which are harmful to performance.
1023 // See also: GuaranteedSafepointInterval
1024 //
1025 // The current implementation uses asynchronous VM operations.
1026
InduceScavenge(Thread * Self,const char * Whence)1027 static void InduceScavenge(Thread * Self, const char * Whence) {
1028 // Induce STW safepoint to trim monitors
1029 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1030 // More precisely, trigger an asynchronous STW safepoint as the number
1031 // of active monitors passes the specified threshold.
1032 // TODO: assert thread state is reasonable
1033
1034 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
1035 // Induce a 'null' safepoint to scavenge monitors
1036 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1037 // to the VMthread and have a lifespan longer than that of this activation record.
1038 // The VMThread will delete the op when completed.
1039 VMThread::execute(new VM_ScavengeMonitors());
1040 }
1041 }
1042
omAlloc(Thread * Self)1043 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1044 // A large MAXPRIVATE value reduces both list lock contention
1045 // and list coherency traffic, but also tends to increase the
1046 // number of objectMonitors in circulation as well as the STW
1047 // scavenge costs. As usual, we lean toward time in space-time
1048 // tradeoffs.
1049 const int MAXPRIVATE = 1024;
1050 for (;;) {
1051 ObjectMonitor * m;
1052
1053 // 1: try to allocate from the thread's local omFreeList.
1054 // Threads will attempt to allocate first from their local list, then
1055 // from the global list, and only after those attempts fail will the thread
1056 // attempt to instantiate new monitors. Thread-local free lists take
1057 // heat off the gListLock and improve allocation latency, as well as reducing
1058 // coherency traffic on the shared global list.
1059 m = Self->omFreeList;
1060 if (m != NULL) {
1061 Self->omFreeList = m->FreeNext;
1062 Self->omFreeCount--;
1063 guarantee(m->object() == NULL, "invariant");
1064 m->FreeNext = Self->omInUseList;
1065 Self->omInUseList = m;
1066 Self->omInUseCount++;
1067 return m;
1068 }
1069
1070 // 2: try to allocate from the global gFreeList
1071 // CONSIDER: use muxTry() instead of muxAcquire().
1072 // If the muxTry() fails then drop immediately into case 3.
1073 // If we're using thread-local free lists then try
1074 // to reprovision the caller's free list.
1075 if (gFreeList != NULL) {
1076 // Reprovision the thread's omFreeList.
1077 // Use bulk transfers to reduce the allocation rate and heat
1078 // on various locks.
1079 Thread::muxAcquire(&gListLock, "omAlloc");
1080 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1081 gMonitorFreeCount--;
1082 ObjectMonitor * take = gFreeList;
1083 gFreeList = take->FreeNext;
1084 guarantee(take->object() == NULL, "invariant");
1085 guarantee(!take->is_busy(), "invariant");
1086 take->Recycle();
1087 omRelease(Self, take, false);
1088 }
1089 Thread::muxRelease(&gListLock);
1090 Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1091 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1092
1093 const int mx = MonitorBound;
1094 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1095 // We can't safely induce a STW safepoint from omAlloc() as our thread
1096 // state may not be appropriate for such activities and callers may hold
1097 // naked oops, so instead we defer the action.
1098 InduceScavenge(Self, "omAlloc");
1099 }
1100 continue;
1101 }
1102
1103 // 3: allocate a block of new ObjectMonitors
1104 // Both the local and global free lists are empty -- resort to malloc().
1105 // In the current implementation objectMonitors are TSM - immortal.
1106 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1107 // each ObjectMonitor to start at the beginning of a cache line,
1108 // so we use align_up().
1109 // A better solution would be to use C++ placement-new.
1110 // BEWARE: As it stands currently, we don't run the ctors!
1111 assert(_BLOCKSIZE > 1, "invariant");
1112 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE;
1113 PaddedEnd<ObjectMonitor> * temp;
1114 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1115 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size,
1116 mtInternal);
1117 temp = (PaddedEnd<ObjectMonitor> *)
1118 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
1119
1120 // NOTE: (almost) no way to recover if allocation failed.
1121 // We might be able to induce a STW safepoint and scavenge enough
1122 // objectMonitors to permit progress.
1123 if (temp == NULL) {
1124 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1125 "Allocate ObjectMonitors");
1126 }
1127 (void)memset((void *) temp, 0, neededsize);
1128
1129 // Format the block.
1130 // initialize the linked list, each monitor points to its next
1131 // forming the single linked free list, the very first monitor
1132 // will points to next block, which forms the block list.
1133 // The trick of using the 1st element in the block as gBlockList
1134 // linkage should be reconsidered. A better implementation would
1135 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1136
1137 for (int i = 1; i < _BLOCKSIZE; i++) {
1138 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1139 }
1140
1141 // terminate the last monitor as the end of list
1142 temp[_BLOCKSIZE - 1].FreeNext = NULL;
1143
1144 // Element [0] is reserved for global list linkage
1145 temp[0].set_object(CHAINMARKER);
1146
1147 // Consider carving out this thread's current request from the
1148 // block in hand. This avoids some lock traffic and redundant
1149 // list activity.
1150
1151 // Acquire the gListLock to manipulate gBlockList and gFreeList.
1152 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1153 Thread::muxAcquire(&gListLock, "omAlloc [2]");
1154 gMonitorPopulation += _BLOCKSIZE-1;
1155 gMonitorFreeCount += _BLOCKSIZE-1;
1156
1157 // Add the new block to the list of extant blocks (gBlockList).
1158 // The very first objectMonitor in a block is reserved and dedicated.
1159 // It serves as blocklist "next" linkage.
1160 temp[0].FreeNext = gBlockList;
1161 // There are lock-free uses of gBlockList so make sure that
1162 // the previous stores happen before we update gBlockList.
1163 OrderAccess::release_store(&gBlockList, temp);
1164
1165 // Add the new string of objectMonitors to the global free list
1166 temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1167 gFreeList = temp + 1;
1168 Thread::muxRelease(&gListLock);
1169 }
1170 }
1171
1172 // Place "m" on the caller's private per-thread omFreeList.
1173 // In practice there's no need to clamp or limit the number of
1174 // monitors on a thread's omFreeList as the only time we'll call
1175 // omRelease is to return a monitor to the free list after a CAS
1176 // attempt failed. This doesn't allow unbounded #s of monitors to
1177 // accumulate on a thread's free list.
1178 //
1179 // Key constraint: all ObjectMonitors on a thread's free list and the global
1180 // free list must have their object field set to null. This prevents the
1181 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1182
omRelease(Thread * Self,ObjectMonitor * m,bool fromPerThreadAlloc)1183 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1184 bool fromPerThreadAlloc) {
1185 guarantee(m->object() == NULL, "invariant");
1186 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1187 // Remove from omInUseList
1188 if (fromPerThreadAlloc) {
1189 ObjectMonitor* cur_mid_in_use = NULL;
1190 bool extracted = false;
1191 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1192 if (m == mid) {
1193 // extract from per-thread in-use list
1194 if (mid == Self->omInUseList) {
1195 Self->omInUseList = mid->FreeNext;
1196 } else if (cur_mid_in_use != NULL) {
1197 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1198 }
1199 extracted = true;
1200 Self->omInUseCount--;
1201 break;
1202 }
1203 }
1204 assert(extracted, "Should have extracted from in-use list");
1205 }
1206
1207 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1208 m->FreeNext = Self->omFreeList;
1209 Self->omFreeList = m;
1210 Self->omFreeCount++;
1211 }
1212
1213 // Return the monitors of a moribund thread's local free list to
1214 // the global free list. Typically a thread calls omFlush() when
1215 // it's dying. We could also consider having the VM thread steal
1216 // monitors from threads that have not run java code over a few
1217 // consecutive STW safepoints. Relatedly, we might decay
1218 // omFreeProvision at STW safepoints.
1219 //
1220 // Also return the monitors of a moribund thread's omInUseList to
1221 // a global gOmInUseList under the global list lock so these
1222 // will continue to be scanned.
1223 //
1224 // We currently call omFlush() from Threads::remove() _before the thread
1225 // has been excised from the thread list and is no longer a mutator.
1226 // This means that omFlush() can not run concurrently with a safepoint and
1227 // interleave with the scavenge operator. In particular, this ensures that
1228 // the thread's monitors are scanned by a GC safepoint, either via
1229 // Thread::oops_do() (if safepoint happens before omFlush()) or via
1230 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's
1231 // monitors have been transferred to the global in-use list).
1232
omFlush(Thread * Self)1233 void ObjectSynchronizer::omFlush(Thread * Self) {
1234 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL
1235 Self->omFreeList = NULL;
1236 ObjectMonitor * tail = NULL;
1237 int tally = 0;
1238 if (list != NULL) {
1239 ObjectMonitor * s;
1240 // The thread is going away, the per-thread free monitors
1241 // are freed via set_owner(NULL)
1242 // Link them to tail, which will be linked into the global free list
1243 // gFreeList below, under the gListLock
1244 for (s = list; s != NULL; s = s->FreeNext) {
1245 tally++;
1246 tail = s;
1247 guarantee(s->object() == NULL, "invariant");
1248 guarantee(!s->is_busy(), "invariant");
1249 s->set_owner(NULL); // redundant but good hygiene
1250 }
1251 guarantee(tail != NULL && list != NULL, "invariant");
1252 }
1253
1254 ObjectMonitor * inUseList = Self->omInUseList;
1255 ObjectMonitor * inUseTail = NULL;
1256 int inUseTally = 0;
1257 if (inUseList != NULL) {
1258 Self->omInUseList = NULL;
1259 ObjectMonitor *cur_om;
1260 // The thread is going away, however the omInUseList inflated
1261 // monitors may still be in-use by other threads.
1262 // Link them to inUseTail, which will be linked into the global in-use list
1263 // gOmInUseList below, under the gListLock
1264 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) {
1265 inUseTail = cur_om;
1266 inUseTally++;
1267 }
1268 assert(Self->omInUseCount == inUseTally, "in-use count off");
1269 Self->omInUseCount = 0;
1270 guarantee(inUseTail != NULL && inUseList != NULL, "invariant");
1271 }
1272
1273 Thread::muxAcquire(&gListLock, "omFlush");
1274 if (tail != NULL) {
1275 tail->FreeNext = gFreeList;
1276 gFreeList = list;
1277 gMonitorFreeCount += tally;
1278 assert(Self->omFreeCount == tally, "free-count off");
1279 Self->omFreeCount = 0;
1280 }
1281
1282 if (inUseTail != NULL) {
1283 inUseTail->FreeNext = gOmInUseList;
1284 gOmInUseList = inUseList;
1285 gOmInUseCount += inUseTally;
1286 }
1287
1288 Thread::muxRelease(&gListLock);
1289 }
1290
post_monitor_inflate_event(EventJavaMonitorInflate * event,const oop obj,ObjectSynchronizer::InflateCause cause)1291 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1292 const oop obj,
1293 ObjectSynchronizer::InflateCause cause) {
1294 assert(event != NULL, "invariant");
1295 assert(event->should_commit(), "invariant");
1296 event->set_monitorClass(obj->klass());
1297 event->set_address((uintptr_t)(void*)obj);
1298 event->set_cause((u1)cause);
1299 event->commit();
1300 }
1301
1302 // Fast path code shared by multiple functions
inflate_helper(oop obj)1303 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1304 markOop mark = obj->mark();
1305 if (mark->has_monitor()) {
1306 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1307 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1308 return mark->monitor();
1309 }
1310 return ObjectSynchronizer::inflate(Thread::current(),
1311 obj,
1312 inflate_cause_vm_internal);
1313 }
1314
inflate(Thread * Self,oop object,const InflateCause cause)1315 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1316 oop object,
1317 const InflateCause cause) {
1318
1319 // Inflate mutates the heap ...
1320 // Relaxing assertion for bug 6320749.
1321 assert(Universe::verify_in_progress() ||
1322 !SafepointSynchronize::is_at_safepoint(), "invariant");
1323
1324 EventJavaMonitorInflate event;
1325
1326 for (;;) {
1327 const markOop mark = object->mark();
1328 assert(!mark->has_bias_pattern(), "invariant");
1329
1330 // The mark can be in one of the following states:
1331 // * Inflated - just return
1332 // * Stack-locked - coerce it to inflated
1333 // * INFLATING - busy wait for conversion to complete
1334 // * Neutral - aggressively inflate the object.
1335 // * BIASED - Illegal. We should never see this
1336
1337 // CASE: inflated
1338 if (mark->has_monitor()) {
1339 ObjectMonitor * inf = mark->monitor();
1340 assert(inf->header()->is_neutral(), "invariant");
1341 assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1342 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1343 return inf;
1344 }
1345
1346 // CASE: inflation in progress - inflating over a stack-lock.
1347 // Some other thread is converting from stack-locked to inflated.
1348 // Only that thread can complete inflation -- other threads must wait.
1349 // The INFLATING value is transient.
1350 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1351 // We could always eliminate polling by parking the thread on some auxiliary list.
1352 if (mark == markOopDesc::INFLATING()) {
1353 ReadStableMark(object);
1354 continue;
1355 }
1356
1357 // CASE: stack-locked
1358 // Could be stack-locked either by this thread or by some other thread.
1359 //
1360 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1361 // to install INFLATING into the mark word. We originally installed INFLATING,
1362 // allocated the objectmonitor, and then finally STed the address of the
1363 // objectmonitor into the mark. This was correct, but artificially lengthened
1364 // the interval in which INFLATED appeared in the mark, thus increasing
1365 // the odds of inflation contention.
1366 //
1367 // We now use per-thread private objectmonitor free lists.
1368 // These list are reprovisioned from the global free list outside the
1369 // critical INFLATING...ST interval. A thread can transfer
1370 // multiple objectmonitors en-mass from the global free list to its local free list.
1371 // This reduces coherency traffic and lock contention on the global free list.
1372 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1373 // before or after the CAS(INFLATING) operation.
1374 // See the comments in omAlloc().
1375
1376 if (mark->has_locker()) {
1377 ObjectMonitor * m = omAlloc(Self);
1378 // Optimistically prepare the objectmonitor - anticipate successful CAS
1379 // We do this before the CAS in order to minimize the length of time
1380 // in which INFLATING appears in the mark.
1381 m->Recycle();
1382 m->_Responsible = NULL;
1383 m->_recursions = 0;
1384 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1385
1386 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1387 if (cmp != mark) {
1388 omRelease(Self, m, true);
1389 continue; // Interference -- just retry
1390 }
1391
1392 // We've successfully installed INFLATING (0) into the mark-word.
1393 // This is the only case where 0 will appear in a mark-word.
1394 // Only the singular thread that successfully swings the mark-word
1395 // to 0 can perform (or more precisely, complete) inflation.
1396 //
1397 // Why do we CAS a 0 into the mark-word instead of just CASing the
1398 // mark-word from the stack-locked value directly to the new inflated state?
1399 // Consider what happens when a thread unlocks a stack-locked object.
1400 // It attempts to use CAS to swing the displaced header value from the
1401 // on-stack basiclock back into the object header. Recall also that the
1402 // header value (hashcode, etc) can reside in (a) the object header, or
1403 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1404 // header in an objectMonitor. The inflate() routine must copy the header
1405 // value from the basiclock on the owner's stack to the objectMonitor, all
1406 // the while preserving the hashCode stability invariants. If the owner
1407 // decides to release the lock while the value is 0, the unlock will fail
1408 // and control will eventually pass from slow_exit() to inflate. The owner
1409 // will then spin, waiting for the 0 value to disappear. Put another way,
1410 // the 0 causes the owner to stall if the owner happens to try to
1411 // drop the lock (restoring the header from the basiclock to the object)
1412 // while inflation is in-progress. This protocol avoids races that might
1413 // would otherwise permit hashCode values to change or "flicker" for an object.
1414 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1415 // 0 serves as a "BUSY" inflate-in-progress indicator.
1416
1417
1418 // fetch the displaced mark from the owner's stack.
1419 // The owner can't die or unwind past the lock while our INFLATING
1420 // object is in the mark. Furthermore the owner can't complete
1421 // an unlock on the object, either.
1422 markOop dmw = mark->displaced_mark_helper();
1423 assert(dmw->is_neutral(), "invariant");
1424
1425 // Setup monitor fields to proper values -- prepare the monitor
1426 m->set_header(dmw);
1427
1428 // Optimization: if the mark->locker stack address is associated
1429 // with this thread we could simply set m->_owner = Self.
1430 // Note that a thread can inflate an object
1431 // that it has stack-locked -- as might happen in wait() -- directly
1432 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1433 m->set_owner(mark->locker());
1434 m->set_object(object);
1435 // TODO-FIXME: assert BasicLock->dhw != 0.
1436
1437 // Must preserve store ordering. The monitor state must
1438 // be stable at the time of publishing the monitor address.
1439 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1440 object->release_set_mark(markOopDesc::encode(m));
1441
1442 // Hopefully the performance counters are allocated on distinct cache lines
1443 // to avoid false sharing on MP systems ...
1444 OM_PERFDATA_OP(Inflations, inc());
1445 if (log_is_enabled(Debug, monitorinflation)) {
1446 if (object->is_instance()) {
1447 ResourceMark rm;
1448 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1449 p2i(object), p2i(object->mark()),
1450 object->klass()->external_name());
1451 }
1452 }
1453 if (event.should_commit()) {
1454 post_monitor_inflate_event(&event, object, cause);
1455 }
1456 return m;
1457 }
1458
1459 // CASE: neutral
1460 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1461 // If we know we're inflating for entry it's better to inflate by swinging a
1462 // pre-locked objectMonitor pointer into the object header. A successful
1463 // CAS inflates the object *and* confers ownership to the inflating thread.
1464 // In the current implementation we use a 2-step mechanism where we CAS()
1465 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1466 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1467 // would be useful.
1468
1469 assert(mark->is_neutral(), "invariant");
1470 ObjectMonitor * m = omAlloc(Self);
1471 // prepare m for installation - set monitor to initial state
1472 m->Recycle();
1473 m->set_header(mark);
1474 m->set_owner(NULL);
1475 m->set_object(object);
1476 m->_recursions = 0;
1477 m->_Responsible = NULL;
1478 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1479
1480 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1481 m->set_object(NULL);
1482 m->set_owner(NULL);
1483 m->Recycle();
1484 omRelease(Self, m, true);
1485 m = NULL;
1486 continue;
1487 // interference - the markword changed - just retry.
1488 // The state-transitions are one-way, so there's no chance of
1489 // live-lock -- "Inflated" is an absorbing state.
1490 }
1491
1492 // Hopefully the performance counters are allocated on distinct
1493 // cache lines to avoid false sharing on MP systems ...
1494 OM_PERFDATA_OP(Inflations, inc());
1495 if (log_is_enabled(Debug, monitorinflation)) {
1496 if (object->is_instance()) {
1497 ResourceMark rm;
1498 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1499 p2i(object), p2i(object->mark()),
1500 object->klass()->external_name());
1501 }
1502 }
1503 if (event.should_commit()) {
1504 post_monitor_inflate_event(&event, object, cause);
1505 }
1506 return m;
1507 }
1508 }
1509
1510
1511 // We create a list of in-use monitors for each thread.
1512 //
1513 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1514 // deflate_idle_monitors() scans only a global list of in-use monitors which
1515 // is populated only as a thread dies (see omFlush()).
1516 //
1517 // These operations are called at all safepoints, immediately after mutators
1518 // are stopped, but before any objects have moved. Collectively they traverse
1519 // the population of in-use monitors, deflating where possible. The scavenged
1520 // monitors are returned to the monitor free list.
1521 //
1522 // Beware that we scavenge at *every* stop-the-world point. Having a large
1523 // number of monitors in-use could negatively impact performance. We also want
1524 // to minimize the total # of monitors in circulation, as they incur a small
1525 // footprint penalty.
1526 //
1527 // Perversely, the heap size -- and thus the STW safepoint rate --
1528 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1529 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1530 // This is an unfortunate aspect of this design.
1531
1532 // Deflate a single monitor if not in-use
1533 // Return true if deflated, false if in-use
deflate_monitor(ObjectMonitor * mid,oop obj,ObjectMonitor ** freeHeadp,ObjectMonitor ** freeTailp)1534 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1535 ObjectMonitor** freeHeadp,
1536 ObjectMonitor** freeTailp) {
1537 bool deflated;
1538 // Normal case ... The monitor is associated with obj.
1539 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1540 guarantee(mid == obj->mark()->monitor(), "invariant");
1541 guarantee(mid->header()->is_neutral(), "invariant");
1542
1543 if (mid->is_busy()) {
1544 deflated = false;
1545 } else {
1546 // Deflate the monitor if it is no longer being used
1547 // It's idle - scavenge and return to the global free list
1548 // plain old deflation ...
1549 if (log_is_enabled(Debug, monitorinflation)) {
1550 if (obj->is_instance()) {
1551 ResourceMark rm;
1552 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1553 "mark " INTPTR_FORMAT " , type %s",
1554 p2i(obj), p2i(obj->mark()),
1555 obj->klass()->external_name());
1556 }
1557 }
1558
1559 // Restore the header back to obj
1560 obj->release_set_mark(mid->header());
1561 mid->clear();
1562
1563 assert(mid->object() == NULL, "invariant");
1564
1565 // Move the object to the working free list defined by freeHeadp, freeTailp
1566 if (*freeHeadp == NULL) *freeHeadp = mid;
1567 if (*freeTailp != NULL) {
1568 ObjectMonitor * prevtail = *freeTailp;
1569 assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1570 prevtail->FreeNext = mid;
1571 }
1572 *freeTailp = mid;
1573 deflated = true;
1574 }
1575 return deflated;
1576 }
1577
1578 // Walk a given monitor list, and deflate idle monitors
1579 // The given list could be a per-thread list or a global list
1580 // Caller acquires gListLock.
1581 //
1582 // In the case of parallel processing of thread local monitor lists,
1583 // work is done by Threads::parallel_threads_do() which ensures that
1584 // each Java thread is processed by exactly one worker thread, and
1585 // thus avoid conflicts that would arise when worker threads would
1586 // process the same monitor lists concurrently.
1587 //
1588 // See also ParallelSPCleanupTask and
1589 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1590 // Threads::parallel_java_threads_do() in thread.cpp.
deflate_monitor_list(ObjectMonitor ** listHeadp,ObjectMonitor ** freeHeadp,ObjectMonitor ** freeTailp)1591 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1592 ObjectMonitor** freeHeadp,
1593 ObjectMonitor** freeTailp) {
1594 ObjectMonitor* mid;
1595 ObjectMonitor* next;
1596 ObjectMonitor* cur_mid_in_use = NULL;
1597 int deflated_count = 0;
1598
1599 for (mid = *listHeadp; mid != NULL;) {
1600 oop obj = (oop) mid->object();
1601 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) {
1602 // if deflate_monitor succeeded,
1603 // extract from per-thread in-use list
1604 if (mid == *listHeadp) {
1605 *listHeadp = mid->FreeNext;
1606 } else if (cur_mid_in_use != NULL) {
1607 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1608 }
1609 next = mid->FreeNext;
1610 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list
1611 mid = next;
1612 deflated_count++;
1613 } else {
1614 cur_mid_in_use = mid;
1615 mid = mid->FreeNext;
1616 }
1617 }
1618 return deflated_count;
1619 }
1620
prepare_deflate_idle_monitors(DeflateMonitorCounters * counters)1621 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1622 counters->nInuse = 0; // currently associated with objects
1623 counters->nInCirculation = 0; // extant
1624 counters->nScavenged = 0; // reclaimed
1625 counters->perThreadTimes = 0.0; // per-thread scavenge times
1626 }
1627
deflate_idle_monitors(DeflateMonitorCounters * counters)1628 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1629 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1630 bool deflated = false;
1631
1632 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
1633 ObjectMonitor * freeTailp = NULL;
1634
1635 // Prevent omFlush from changing mids in Thread dtor's during deflation
1636 // And in case the vm thread is acquiring a lock during a safepoint
1637 // See e.g. 6320749
1638 Thread::muxAcquire(&gListLock, "scavenge - return");
1639
1640 // Note: the thread-local monitors lists get deflated in
1641 // a separate pass. See deflate_thread_local_monitors().
1642
1643 // For moribund threads, scan gOmInUseList
1644 if (gOmInUseList) {
1645 counters->nInCirculation += gOmInUseCount;
1646 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1647 gOmInUseCount -= deflated_count;
1648 counters->nScavenged += deflated_count;
1649 counters->nInuse += gOmInUseCount;
1650 }
1651
1652 // Move the scavenged monitors back to the global free list.
1653 if (freeHeadp != NULL) {
1654 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant");
1655 assert(freeTailp->FreeNext == NULL, "invariant");
1656 // constant-time list splice - prepend scavenged segment to gFreeList
1657 freeTailp->FreeNext = gFreeList;
1658 gFreeList = freeHeadp;
1659 }
1660 Thread::muxRelease(&gListLock);
1661
1662 }
1663
finish_deflate_idle_monitors(DeflateMonitorCounters * counters)1664 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1665 if (log_is_enabled(Info, safepoint, cleanup)) {
1666 // Report the cumulative time for deflating each thread's idle
1667 // monitors. Note: if the work is split among more than one
1668 // worker thread, then the reported time will likely be more
1669 // than a beginning to end measurement of the phase.
1670 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs", counters->perThreadTimes);
1671 }
1672
1673 gMonitorFreeCount += counters->nScavenged;
1674
1675 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree.
1676
1677 ForceMonitorScavenge = 0; // Reset
1678
1679 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged));
1680 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation));
1681
1682 // TODO: Add objectMonitor leak detection.
1683 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1684 GVars.stwRandom = os::random();
1685 GVars.stwCycle++;
1686 }
1687
deflate_thread_local_monitors(Thread * thread,DeflateMonitorCounters * counters)1688 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1689 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1690
1691 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors
1692 ObjectMonitor * freeTailp = NULL;
1693 elapsedTimer timer;
1694
1695 if (log_is_enabled(Info, safepoint, cleanup)) {
1696 timer.start();
1697 }
1698
1699 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
1700
1701 timer.stop();
1702
1703 Thread::muxAcquire(&gListLock, "scavenge - return");
1704
1705 // Adjust counters
1706 counters->nInCirculation += thread->omInUseCount;
1707 thread->omInUseCount -= deflated_count;
1708 counters->nScavenged += deflated_count;
1709 counters->nInuse += thread->omInUseCount;
1710 // For now, we only care about cumulative per-thread deflation time.
1711 counters->perThreadTimes += timer.seconds();
1712
1713 // Move the scavenged monitors back to the global free list.
1714 if (freeHeadp != NULL) {
1715 guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
1716 assert(freeTailp->FreeNext == NULL, "invariant");
1717
1718 // constant-time list splice - prepend scavenged segment to gFreeList
1719 freeTailp->FreeNext = gFreeList;
1720 gFreeList = freeHeadp;
1721 }
1722 Thread::muxRelease(&gListLock);
1723 }
1724
1725 // Monitor cleanup on JavaThread::exit
1726
1727 // Iterate through monitor cache and attempt to release thread's monitors
1728 // Gives up on a particular monitor if an exception occurs, but continues
1729 // the overall iteration, swallowing the exception.
1730 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1731 private:
1732 TRAPS;
1733
1734 public:
ReleaseJavaMonitorsClosure(Thread * thread)1735 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
do_monitor(ObjectMonitor * mid)1736 void do_monitor(ObjectMonitor* mid) {
1737 if (mid->owner() == THREAD) {
1738 (void)mid->complete_exit(CHECK);
1739 }
1740 }
1741 };
1742
1743 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1744 // ignored. This is meant to be called during JNI thread detach which assumes
1745 // all remaining monitors are heavyweight. All exceptions are swallowed.
1746 // Scanning the extant monitor list can be time consuming.
1747 // A simple optimization is to add a per-thread flag that indicates a thread
1748 // called jni_monitorenter() during its lifetime.
1749 //
1750 // Instead of No_Savepoint_Verifier it might be cheaper to
1751 // use an idiom of the form:
1752 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1753 // <code that must not run at safepoint>
1754 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1755 // Since the tests are extremely cheap we could leave them enabled
1756 // for normal product builds.
1757
release_monitors_owned_by_thread(TRAPS)1758 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1759 assert(THREAD == JavaThread::current(), "must be current Java thread");
1760 NoSafepointVerifier nsv;
1761 ReleaseJavaMonitorsClosure rjmc(THREAD);
1762 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1763 ObjectSynchronizer::monitors_iterate(&rjmc);
1764 Thread::muxRelease(&gListLock);
1765 THREAD->clear_pending_exception();
1766 }
1767
inflate_cause_name(const InflateCause cause)1768 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1769 switch (cause) {
1770 case inflate_cause_vm_internal: return "VM Internal";
1771 case inflate_cause_monitor_enter: return "Monitor Enter";
1772 case inflate_cause_wait: return "Monitor Wait";
1773 case inflate_cause_notify: return "Monitor Notify";
1774 case inflate_cause_hash_code: return "Monitor Hash Code";
1775 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1776 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1777 default:
1778 ShouldNotReachHere();
1779 }
1780 return "Unknown";
1781 }
1782
1783 //------------------------------------------------------------------------------
1784 // Debugging code
1785
get_gvars_addr()1786 u_char* ObjectSynchronizer::get_gvars_addr() {
1787 return (u_char*)&GVars;
1788 }
1789
get_gvars_hcSequence_addr()1790 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() {
1791 return (u_char*)&GVars.hcSequence;
1792 }
1793
get_gvars_size()1794 size_t ObjectSynchronizer::get_gvars_size() {
1795 return sizeof(SharedGlobals);
1796 }
1797
get_gvars_stwRandom_addr()1798 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() {
1799 return (u_char*)&GVars.stwRandom;
1800 }
1801
1802 #ifndef PRODUCT
1803
1804 // Check if monitor belongs to the monitor cache
1805 // The list is grow-only so it's *relatively* safe to traverse
1806 // the list of extant blocks without taking a lock.
1807
verify_objmon_isinpool(ObjectMonitor * monitor)1808 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1809 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);
1810 while (block != NULL) {
1811 assert(block->object() == CHAINMARKER, "must be a block header");
1812 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
1813 address mon = (address)monitor;
1814 address blk = (address)block;
1815 size_t diff = mon - blk;
1816 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1817 return 1;
1818 }
1819 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1820 }
1821 return 0;
1822 }
1823
1824 #endif
1825