1 /*
2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "runtime/atomic.hpp"
27 #include "runtime/interfaceSupport.inline.hpp"
28 #include "runtime/mutex.hpp"
29 #include "runtime/orderAccess.hpp"
30 #include "runtime/osThread.hpp"
31 #include "runtime/safepointMechanism.inline.hpp"
32 #include "runtime/thread.inline.hpp"
33 #include "utilities/events.hpp"
34 #include "utilities/macros.hpp"
35
36 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
37 //
38 // Native Monitor-Mutex locking - theory of operations
39 //
40 // * Native Monitors are completely unrelated to Java-level monitors,
41 // although the "back-end" slow-path implementations share a common lineage.
42 // See objectMonitor:: in synchronizer.cpp.
43 // Native Monitors do *not* support nesting or recursion but otherwise
44 // they're basically Hoare-flavor monitors.
45 //
46 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
47 // in the _LockWord from zero to non-zero. Note that the _Owner field
48 // is advisory and is used only to verify that the thread calling unlock()
49 // is indeed the last thread to have acquired the lock.
50 //
51 // * Contending threads "push" themselves onto the front of the contention
52 // queue -- called the cxq -- with CAS and then spin/park.
53 // The _LockWord contains the LockByte as well as the pointer to the head
54 // of the cxq. Colocating the LockByte with the cxq precludes certain races.
55 //
56 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
57 // idioms. We currently use MEMBAR in the uncontended unlock() path, as
58 // MEMBAR often has less latency than CAS. If warranted, we could switch to
59 // a CAS:0 mode, using timers to close the resultant race, as is done
60 // with Java Monitors in synchronizer.cpp.
61 //
62 // See the following for a discussion of the relative cost of atomics (CAS)
63 // MEMBAR, and ways to eliminate such instructions from the common-case paths:
64 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
65 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf
66 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
67 // -- synchronizer.cpp
68 //
69 // * Overall goals - desiderata
70 // 1. Minimize context switching
71 // 2. Minimize lock migration
72 // 3. Minimize CPI -- affinity and locality
73 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
74 // 5. Minimize outer lock hold times
75 // 6. Behave gracefully on a loaded system
76 //
77 // * Thread flow and list residency:
78 //
79 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
80 // [..resident on monitor list..]
81 // [...........contending..................]
82 //
83 // -- The contention queue (cxq) contains recently-arrived threads (RATs).
84 // Threads on the cxq eventually drain into the EntryList.
85 // -- Invariant: a thread appears on at most one list -- cxq, EntryList
86 // or WaitSet -- at any one time.
87 // -- For a given monitor there can be at most one "OnDeck" thread at any
88 // given time but if needbe this particular invariant could be relaxed.
89 //
90 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
91 // I use ParkEvent instead of threads as ParkEvents are immortal and
92 // type-stable, meaning we can safely unpark() a possibly stale
93 // list element in the unlock()-path. (That's benign).
94 //
95 // * Succession policy - providing for progress:
96 //
97 // As necessary, the unlock()ing thread identifies, unlinks, and unparks
98 // an "heir presumptive" tentative successor thread from the EntryList.
99 // This becomes the so-called "OnDeck" thread, of which there can be only
100 // one at any given time for a given monitor. The wakee will recontend
101 // for ownership of monitor.
102 //
103 // Succession is provided for by a policy of competitive handoff.
104 // The exiting thread does _not_ grant or pass ownership to the
105 // successor thread. (This is also referred to as "handoff" succession").
106 // Instead the exiting thread releases ownership and possibly wakes
107 // a successor, so the successor can (re)compete for ownership of the lock.
108 //
109 // Competitive handoff provides excellent overall throughput at the expense
110 // of short-term fairness. If fairness is a concern then one remedy might
111 // be to add an AcquireCounter field to the monitor. After a thread acquires
112 // the lock it will decrement the AcquireCounter field. When the count
113 // reaches 0 the thread would reset the AcquireCounter variable, abdicate
114 // the lock directly to some thread on the EntryList, and then move itself to the
115 // tail of the EntryList.
116 //
117 // But in practice most threads engage or otherwise participate in resource
118 // bounded producer-consumer relationships, so lock domination is not usually
119 // a practical concern. Recall too, that in general it's easier to construct
120 // a fair lock from a fast lock, but not vice-versa.
121 //
122 // * The cxq can have multiple concurrent "pushers" but only one concurrent
123 // detaching thread. This mechanism is immune from the ABA corruption.
124 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
125 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
126 // thread constraint.
127 //
128 // * Taken together, the cxq and the EntryList constitute or form a
129 // single logical queue of threads stalled trying to acquire the lock.
130 // We use two distinct lists to reduce heat on the list ends.
131 // Threads in lock() enqueue onto cxq while threads in unlock() will
132 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
133 // A key desideratum is to minimize queue & monitor metadata manipulation
134 // that occurs while holding the "outer" monitor lock -- that is, we want to
135 // minimize monitor lock holds times.
136 //
137 // The EntryList is ordered by the prevailing queue discipline and
138 // can be organized in any convenient fashion, such as a doubly-linked list or
139 // a circular doubly-linked list. If we need a priority queue then something akin
140 // to Solaris' sleepq would work nicely. Viz.,
141 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
142 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
143 // Queue discipline is enforced at ::unlock() time, when the unlocking thread
144 // drains the cxq into the EntryList, and orders or reorders the threads on the
145 // EntryList accordingly.
146 //
147 // Barring "lock barging", this mechanism provides fair cyclic ordering,
148 // somewhat similar to an elevator-scan.
149 //
150 // * OnDeck
151 // -- For a given monitor there can be at most one OnDeck thread at any given
152 // instant. The OnDeck thread is contending for the lock, but has been
153 // unlinked from the EntryList and cxq by some previous unlock() operations.
154 // Once a thread has been designated the OnDeck thread it will remain so
155 // until it manages to acquire the lock -- being OnDeck is a stable property.
156 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
157 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
158 // having cleared the LockByte and dropped the outer lock, attempt to "trylock"
159 // OnDeck by CASing the field from null to non-null. If successful, that thread
160 // is then responsible for progress and succession and can use CAS to detach and
161 // drain the cxq into the EntryList. By convention, only this thread, the holder of
162 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the
163 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
164 // we allow multiple concurrent "push" operations but restrict detach concurrency
165 // to at most one thread. Having selected and detached a successor, the thread then
166 // changes the OnDeck to refer to that successor, and then unparks the successor.
167 // That successor will eventually acquire the lock and clear OnDeck. Beware
168 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
169 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
170 // and then the successor eventually "drops" OnDeck. Note that there's never
171 // any sense of contention on the inner lock, however. Threads never contend
172 // or wait for the inner lock.
173 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
174 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
175 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
176 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
177 //
178 // * Waiting threads reside on the WaitSet list -- wait() puts
179 // the caller onto the WaitSet. Notify() or notifyAll() simply
180 // transfers threads from the WaitSet to either the EntryList or cxq.
181 // Subsequent unlock() operations will eventually unpark the notifyee.
182 // Unparking a notifee in notify() proper is inefficient - if we were to do so
183 // it's likely the notifyee would simply impale itself on the lock held
184 // by the notifier.
185 //
186 // * The mechanism is obstruction-free in that if the holder of the transient
187 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads
188 // can still acquire and release the outer lock and continue to make progress.
189 // At worst, waking of already blocked contending threads may be delayed,
190 // but nothing worse. (We only use "trylock" operations on the inner OnDeck
191 // lock).
192 //
193 // * Note that thread-local storage must be initialized before a thread
194 // uses Native monitors or mutexes. The native monitor-mutex subsystem
195 // depends on Thread::current().
196 //
197 // * The monitor synchronization subsystem avoids the use of native
198 // synchronization primitives except for the narrow platform-specific
199 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
200 // the semantics of park-unpark. Put another way, this monitor implementation
201 // depends only on atomic operations and park-unpark. The monitor subsystem
202 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
203 // underlying OS manages the READY<->RUN transitions.
204 //
205 // * The memory consistency model provide by lock()-unlock() is at least as
206 // strong or stronger than the Java Memory model defined by JSR-133.
207 // That is, we guarantee at least entry consistency, if not stronger.
208 // See http://g.oswego.edu/dl/jmm/cookbook.html.
209 //
210 // * Thread:: currently contains a set of purpose-specific ParkEvents:
211 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
212 // the purpose-specific ParkEvents and instead implement a general per-thread
213 // stack of available ParkEvents which we could provision on-demand. The
214 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
215 // and ::Release(). A thread would simply pop an element from the local stack before it
216 // enqueued or park()ed. When the contention was over the thread would
217 // push the no-longer-needed ParkEvent back onto its stack.
218 //
219 // * A slightly reduced form of ILock() and IUnlock() have been partially
220 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
221 // It'd be interesting to see if TLA/TLC could be useful as well.
222 //
223 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
224 // code should never call other code in the JVM that might itself need to
225 // acquire monitors or mutexes. That's true *except* in the case of the
226 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
227 // mutator reentry (ingress) by checking for a pending safepoint in which case it will
228 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
229 // In that particular case a call to lock() for a given Monitor can end up recursively
230 // calling lock() on another monitor. While distasteful, this is largely benign
231 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
232 //
233 // It's unfortunate that native mutexes and thread state transitions were convolved.
234 // They're really separate concerns and should have remained that way. Melding
235 // them together was facile -- a bit too facile. The current implementation badly
236 // conflates the two concerns.
237 //
238 // * TODO-FIXME:
239 //
240 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
241 // We should also add DTRACE probes in the ParkEvent subsystem for
242 // Park-entry, Park-exit, and Unpark.
243 //
244 // -- We have an excess of mutex-like constructs in the JVM, namely:
245 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
246 // 2. low-level muxAcquire and muxRelease
247 // 3. low-level spinAcquire and spinRelease
248 // 4. native Mutex:: and Monitor::
249 // 5. jvm_raw_lock() and _unlock()
250 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
251 // similar name.
252 //
253 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
254
255 #define UNS(x) (uintptr_t(x))
256 #define TRACE(m) \
257 { \
258 static volatile int ctr = 0; \
259 int x = ++ctr; \
260 if ((x & (x - 1)) == 0) { \
261 ::printf("%d:%s\n", x, #m); \
262 ::fflush(stdout); \
263 } \
264 }
265
266 const intptr_t _LBIT = 1;
267
268 // Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
269 #ifdef VM_LITTLE_ENDIAN
270 #define _LSBINDEX 0
271 #else
272 #define _LSBINDEX (sizeof(intptr_t)-1)
273 #endif
274
275 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
276 // Bijective except for the trailing mask operation.
277 // Useful for spin loops as the compiler can't optimize it away.
278
MarsagliaXORV(jint x)279 static inline jint MarsagliaXORV(jint x) {
280 if (x == 0) x = 1|os::random();
281 x ^= x << 6;
282 x ^= ((unsigned)x) >> 21;
283 x ^= x << 7;
284 return x & 0x7FFFFFFF;
285 }
286
Stall(int its)287 static int Stall(int its) {
288 static volatile jint rv = 1;
289 volatile int OnFrame = 0;
290 jint v = rv ^ UNS(OnFrame);
291 while (--its >= 0) {
292 v = MarsagliaXORV(v);
293 }
294 // Make this impossible for the compiler to optimize away,
295 // but (mostly) avoid W coherency sharing on MP systems.
296 if (v == 0x12345) rv = v;
297 return v;
298 }
299
TryLock()300 int Monitor::TryLock() {
301 intptr_t v = _LockWord.FullWord;
302 for (;;) {
303 if ((v & _LBIT) != 0) return 0;
304 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
305 if (v == u) return 1;
306 v = u;
307 }
308 }
309
TryFast()310 int Monitor::TryFast() {
311 // Optimistic fast-path form ...
312 // Fast-path attempt for the common uncontended case.
313 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
314 intptr_t v = Atomic::cmpxchg(_LBIT, &_LockWord.FullWord, (intptr_t)0); // agro ...
315 if (v == 0) return 1;
316
317 for (;;) {
318 if ((v & _LBIT) != 0) return 0;
319 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
320 if (v == u) return 1;
321 v = u;
322 }
323 }
324
ILocked()325 int Monitor::ILocked() {
326 const intptr_t w = _LockWord.FullWord & 0xFF;
327 assert(w == 0 || w == _LBIT, "invariant");
328 return w == _LBIT;
329 }
330
331 // Polite TATAS spinlock with exponential backoff - bounded spin.
332 // Ideally we'd use processor cycles, time or vtime to control
333 // the loop, but we currently use iterations.
334 // All the constants within were derived empirically but work over
335 // over the spectrum of J2SE reference platforms.
336 // On Niagara-class systems the back-off is unnecessary but
337 // is relatively harmless. (At worst it'll slightly retard
338 // acquisition times). The back-off is critical for older SMP systems
339 // where constant fetching of the LockWord would otherwise impair
340 // scalability.
341 //
342 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
343 // See synchronizer.cpp for details and rationale.
344
TrySpin(Thread * const Self)345 int Monitor::TrySpin(Thread * const Self) {
346 if (TryLock()) return 1;
347 if (!os::is_MP()) return 0;
348
349 int Probes = 0;
350 int Delay = 0;
351 int SpinMax = 20;
352 for (;;) {
353 intptr_t v = _LockWord.FullWord;
354 if ((v & _LBIT) == 0) {
355 if (Atomic::cmpxchg (v|_LBIT, &_LockWord.FullWord, v) == v) {
356 return 1;
357 }
358 continue;
359 }
360
361 SpinPause();
362
363 // Periodically increase Delay -- variable Delay form
364 // conceptually: delay *= 1 + 1/Exponent
365 ++Probes;
366 if (Probes > SpinMax) return 0;
367
368 if ((Probes & 0x7) == 0) {
369 Delay = ((Delay << 1)|1) & 0x7FF;
370 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
371 }
372
373 // Consider checking _owner's schedctl state, if OFFPROC abort spin.
374 // If the owner is OFFPROC then it's unlike that the lock will be dropped
375 // in a timely fashion, which suggests that spinning would not be fruitful
376 // or profitable.
377
378 // Stall for "Delay" time units - iterations in the current implementation.
379 // Avoid generating coherency traffic while stalled.
380 // Possible ways to delay:
381 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
382 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
383 // Note that on Niagara-class systems we want to minimize STs in the
384 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
385 // Furthermore, they don't have a W$ like traditional SPARC processors.
386 // We currently use a Marsaglia Shift-Xor RNG loop.
387 if (Self != NULL) {
388 jint rv = Self->rng[0];
389 for (int k = Delay; --k >= 0;) {
390 rv = MarsagliaXORV(rv);
391 if (SafepointMechanism::poll(Self)) return 0;
392 }
393 Self->rng[0] = rv;
394 } else {
395 Stall(Delay);
396 }
397 }
398 }
399
ParkCommon(ParkEvent * ev,jlong timo)400 static int ParkCommon(ParkEvent * ev, jlong timo) {
401 // Diagnostic support - periodically unwedge blocked threads
402 int err = OS_OK;
403 if (0 == timo) {
404 ev->park();
405 } else {
406 err = ev->park(timo);
407 }
408 return err;
409 }
410
AcquireOrPush(ParkEvent * ESelf)411 inline int Monitor::AcquireOrPush(ParkEvent * ESelf) {
412 intptr_t v = _LockWord.FullWord;
413 for (;;) {
414 if ((v & _LBIT) == 0) {
415 const intptr_t u = Atomic::cmpxchg(v|_LBIT, &_LockWord.FullWord, v);
416 if (u == v) return 1; // indicate acquired
417 v = u;
418 } else {
419 // Anticipate success ...
420 ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
421 const intptr_t u = Atomic::cmpxchg(intptr_t(ESelf)|_LBIT, &_LockWord.FullWord, v);
422 if (u == v) return 0; // indicate pushed onto cxq
423 v = u;
424 }
425 // Interference - LockWord change - just retry
426 }
427 }
428
429 // ILock and IWait are the lowest level primitive internal blocking
430 // synchronization functions. The callers of IWait and ILock must have
431 // performed any needed state transitions beforehand.
432 // IWait and ILock may directly call park() without any concern for thread state.
433 // Note that ILock and IWait do *not* access _owner.
434 // _owner is a higher-level logical concept.
435
ILock(Thread * Self)436 void Monitor::ILock(Thread * Self) {
437 assert(_OnDeck != Self->_MutexEvent, "invariant");
438
439 if (TryFast()) {
440 Exeunt:
441 assert(ILocked(), "invariant");
442 return;
443 }
444
445 ParkEvent * const ESelf = Self->_MutexEvent;
446 assert(_OnDeck != ESelf, "invariant");
447
448 // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT
449 // Synchronizer.cpp uses a similar optimization.
450 if (TrySpin(Self)) goto Exeunt;
451
452 // Slow-path - the lock is contended.
453 // Either Enqueue Self on cxq or acquire the outer lock.
454 // LockWord encoding = (cxq,LOCKBYTE)
455 ESelf->reset();
456 OrderAccess::fence();
457
458 if (AcquireOrPush(ESelf)) goto Exeunt;
459
460 // At any given time there is at most one ondeck thread.
461 // ondeck implies not resident on cxq and not resident on EntryList
462 // Only the OnDeck thread can try to acquire -- contend for -- the lock.
463 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
464 // Deschedule Self so that others may run.
465 while (OrderAccess::load_acquire(&_OnDeck) != ESelf) {
466 ParkCommon(ESelf, 0);
467 }
468
469 // Self is now in the OnDeck position and will remain so until it
470 // manages to acquire the lock.
471 for (;;) {
472 assert(_OnDeck == ESelf, "invariant");
473 if (TrySpin(Self)) break;
474 // It's probably wise to spin only if we *actually* blocked
475 // CONSIDER: check the lockbyte, if it remains set then
476 // preemptively drain the cxq into the EntryList.
477 // The best place and time to perform queue operations -- lock metadata --
478 // is _before having acquired the outer lock, while waiting for the lock to drop.
479 ParkCommon(ESelf, 0);
480 }
481
482 assert(_OnDeck == ESelf, "invariant");
483 _OnDeck = NULL;
484
485 // Note that we current drop the inner lock (clear OnDeck) in the slow-path
486 // epilogue immediately after having acquired the outer lock.
487 // But instead we could consider the following optimizations:
488 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
489 // This might avoid potential reacquisition of the inner lock in IUlock().
490 // B. While still holding the inner lock, attempt to opportunistically select
491 // and unlink the next OnDeck thread from the EntryList.
492 // If successful, set OnDeck to refer to that thread, otherwise clear OnDeck.
493 // It's critical that the select-and-unlink operation run in constant-time as
494 // it executes when holding the outer lock and may artificially increase the
495 // effective length of the critical section.
496 // Note that (A) and (B) are tantamount to succession by direct handoff for
497 // the inner lock.
498 goto Exeunt;
499 }
500
IUnlock(bool RelaxAssert)501 void Monitor::IUnlock(bool RelaxAssert) {
502 assert(ILocked(), "invariant");
503 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
504 // before the store that releases the lock. Crucially, all the stores and loads in the
505 // critical section must be globally visible before the store of 0 into the lock-word
506 // that releases the lock becomes globally visible. That is, memory accesses in the
507 // critical section should not be allowed to bypass or overtake the following ST that
508 // releases the lock. As such, to prevent accesses within the critical section
509 // from "leaking" out, we need a release fence between the critical section and the
510 // store that releases the lock. In practice that release barrier is elided on
511 // platforms with strong memory models such as TSO.
512 //
513 // Note that the OrderAccess::storeload() fence that appears after unlock store
514 // provides for progress conditions and succession and is _not related to exclusion
515 // safety or lock release consistency.
516 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], jbyte(0)); // drop outer lock
517
518 OrderAccess::storeload();
519 ParkEvent * const w = _OnDeck; // raw load as we will just return if non-NULL
520 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
521 if (w != NULL) {
522 // Either we have a valid ondeck thread or ondeck is transiently "locked"
523 // by some exiting thread as it arranges for succession. The LSBit of
524 // OnDeck allows us to discriminate two cases. If the latter, the
525 // responsibility for progress and succession lies with that other thread.
526 // For good performance, we also depend on the fact that redundant unpark()
527 // operations are cheap. That is, repeated Unpark()ing of the OnDeck thread
528 // is inexpensive. This approach provides implicit futile wakeup throttling.
529 // Note that the referent "w" might be stale with respect to the lock.
530 // In that case the following unpark() is harmless and the worst that'll happen
531 // is a spurious return from a park() operation. Critically, if "w" _is stale,
532 // then progress is known to have occurred as that means the thread associated
533 // with "w" acquired the lock. In that case this thread need take no further
534 // action to guarantee progress.
535 if ((UNS(w) & _LBIT) == 0) w->unpark();
536 return;
537 }
538
539 intptr_t cxq = _LockWord.FullWord;
540 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
541 return; // normal fast-path exit - cxq and EntryList both empty
542 }
543 if (cxq & _LBIT) {
544 // Optional optimization ...
545 // Some other thread acquired the lock in the window since this
546 // thread released it. Succession is now that thread's responsibility.
547 return;
548 }
549
550 Succession:
551 // Slow-path exit - this thread must ensure succession and progress.
552 // OnDeck serves as lock to protect cxq and EntryList.
553 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
554 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
555 // but only one concurrent consumer (detacher of RATs).
556 // Consider protecting this critical section with schedctl on Solaris.
557 // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
558 // picks a successor and marks that thread as OnDeck. That successor
559 // thread will then clear OnDeck once it eventually acquires the outer lock.
560 if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) {
561 return;
562 }
563
564 ParkEvent * List = _EntryList;
565 if (List != NULL) {
566 // Transfer the head of the EntryList to the OnDeck position.
567 // Once OnDeck, a thread stays OnDeck until it acquires the lock.
568 // For a given lock there is at most OnDeck thread at any one instant.
569 WakeOne:
570 assert(List == _EntryList, "invariant");
571 ParkEvent * const w = List;
572 assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
573 _EntryList = w->ListNext;
574 // as a diagnostic measure consider setting w->_ListNext = BAD
575 assert(intptr_t(_OnDeck) == _LBIT, "invariant");
576
577 // Pass OnDeck role to w, ensuring that _EntryList has been set first.
578 // w will clear _OnDeck once it acquires the outer lock.
579 // Note that once we set _OnDeck that thread can acquire the mutex, proceed
580 // with its critical section and then enter this code to unlock the mutex. So
581 // you can have multiple threads active in IUnlock at the same time.
582 OrderAccess::release_store(&_OnDeck, w);
583
584 // Another optional optimization ...
585 // For heavily contended locks it's not uncommon that some other
586 // thread acquired the lock while this thread was arranging succession.
587 // Try to defer the unpark() operation - Delegate the responsibility
588 // for unpark()ing the OnDeck thread to the current or subsequent owners
589 // That is, the new owner is responsible for unparking the OnDeck thread.
590 OrderAccess::storeload();
591 cxq = _LockWord.FullWord;
592 if (cxq & _LBIT) return;
593
594 w->unpark();
595 return;
596 }
597
598 cxq = _LockWord.FullWord;
599 if ((cxq & ~_LBIT) != 0) {
600 // The EntryList is empty but the cxq is populated.
601 // drain RATs from cxq into EntryList
602 // Detach RATs segment with CAS and then merge into EntryList
603 for (;;) {
604 // optional optimization - if locked, the owner is responsible for succession
605 if (cxq & _LBIT) goto Punt;
606 const intptr_t vfy = Atomic::cmpxchg(cxq & _LBIT, &_LockWord.FullWord, cxq);
607 if (vfy == cxq) break;
608 cxq = vfy;
609 // Interference - LockWord changed - Just retry
610 // We can see concurrent interference from contending threads
611 // pushing themselves onto the cxq or from lock-unlock operations.
612 // From the perspective of this thread, EntryList is stable and
613 // the cxq is prepend-only -- the head is volatile but the interior
614 // of the cxq is stable. In theory if we encounter interference from threads
615 // pushing onto cxq we could simply break off the original cxq suffix and
616 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
617 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
618 // when we first fetch cxq above. Between the fetch -- where we observed "A"
619 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
620 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
621 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
622 // Note too, that it's safe for this thread to traverse the cxq
623 // without taking any special concurrency precautions.
624 }
625
626 // We don't currently reorder the cxq segment as we move it onto
627 // the EntryList, but it might make sense to reverse the order
628 // or perhaps sort by thread priority. See the comments in
629 // synchronizer.cpp objectMonitor::exit().
630 assert(_EntryList == NULL, "invariant");
631 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT);
632 assert(List != NULL, "invariant");
633 goto WakeOne;
634 }
635
636 // cxq|EntryList is empty.
637 // w == NULL implies that cxq|EntryList == NULL in the past.
638 // Possible race - rare inopportune interleaving.
639 // A thread could have added itself to cxq since this thread previously checked.
640 // Detect and recover by refetching cxq.
641 Punt:
642 assert(intptr_t(_OnDeck) == _LBIT, "invariant");
643 _OnDeck = NULL; // Release inner lock.
644 OrderAccess::storeload(); // Dekker duality - pivot point
645
646 // Resample LockWord/cxq to recover from possible race.
647 // For instance, while this thread T1 held OnDeck, some other thread T2 might
648 // acquire the outer lock. Another thread T3 might try to acquire the outer
649 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
650 // outer lock, but skips succession as this thread T1 still holds OnDeck.
651 // T1 is and remains responsible for ensuring succession of T3.
652 //
653 // Note that we don't need to recheck EntryList, just cxq.
654 // If threads moved onto EntryList since we dropped OnDeck
655 // that implies some other thread forced succession.
656 cxq = _LockWord.FullWord;
657 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
658 goto Succession; // potential race -- re-run succession
659 }
660 return;
661 }
662
notify()663 bool Monitor::notify() {
664 assert(_owner == Thread::current(), "invariant");
665 assert(ILocked(), "invariant");
666 if (_WaitSet == NULL) return true;
667
668 // Transfer one thread from the WaitSet to the EntryList or cxq.
669 // Currently we just unlink the head of the WaitSet and prepend to the cxq.
670 // And of course we could just unlink it and unpark it, too, but
671 // in that case it'd likely impale itself on the reentry.
672 Thread::muxAcquire(_WaitLock, "notify:WaitLock");
673 ParkEvent * nfy = _WaitSet;
674 if (nfy != NULL) { // DCL idiom
675 _WaitSet = nfy->ListNext;
676 assert(nfy->Notified == 0, "invariant");
677 // push nfy onto the cxq
678 for (;;) {
679 const intptr_t v = _LockWord.FullWord;
680 assert((v & 0xFF) == _LBIT, "invariant");
681 nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
682 if (Atomic::cmpxchg(intptr_t(nfy)|_LBIT, &_LockWord.FullWord, v) == v) break;
683 // interference - _LockWord changed -- just retry
684 }
685 // Note that setting Notified before pushing nfy onto the cxq is
686 // also legal and safe, but the safety properties are much more
687 // subtle, so for the sake of code stewardship ...
688 OrderAccess::fence();
689 nfy->Notified = 1;
690 }
691 Thread::muxRelease(_WaitLock);
692 assert(ILocked(), "invariant");
693 return true;
694 }
695
696 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
697 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
698 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
699 // Beware too, that we invert the order of the waiters. Lets say that the
700 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
701 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
702
notify_all()703 bool Monitor::notify_all() {
704 assert(_owner == Thread::current(), "invariant");
705 assert(ILocked(), "invariant");
706 while (_WaitSet != NULL) notify();
707 return true;
708 }
709
IWait(Thread * Self,jlong timo)710 int Monitor::IWait(Thread * Self, jlong timo) {
711 assert(ILocked(), "invariant");
712
713 // Phases:
714 // 1. Enqueue Self on WaitSet - currently prepend
715 // 2. unlock - drop the outer lock
716 // 3. wait for either notification or timeout
717 // 4. lock - reentry - reacquire the outer lock
718
719 ParkEvent * const ESelf = Self->_MutexEvent;
720 ESelf->Notified = 0;
721 ESelf->reset();
722 OrderAccess::fence();
723
724 // Add Self to WaitSet
725 // Ideally only the holder of the outer lock would manipulate the WaitSet -
726 // That is, the outer lock would implicitly protect the WaitSet.
727 // But if a thread in wait() encounters a timeout it will need to dequeue itself
728 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
729 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
730 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
731 // on the WaitSet can't be allowed to compete for the lock until it has managed to
732 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
733 // Contention on the WaitLock is minimal.
734 //
735 // Another viable approach would be add another ParkEvent, "WaitEvent" to the
736 // thread class. The WaitSet would be composed of WaitEvents. Only the
737 // owner of the outer lock would manipulate the WaitSet. A thread in wait()
738 // could then compete for the outer lock, and then, if necessary, unlink itself
739 // from the WaitSet only after having acquired the outer lock. More precisely,
740 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
741 // on the WaitSet; release the outer lock; wait for either notification or timeout;
742 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
743 //
744 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
745 // One set would be for the WaitSet and one for the EntryList.
746 // We could also deconstruct the ParkEvent into a "pure" event and add a
747 // new immortal/TSM "ListElement" class that referred to ParkEvents.
748 // In that case we could have one ListElement on the WaitSet and another
749 // on the EntryList, with both referring to the same pure Event.
750
751 Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add");
752 ESelf->ListNext = _WaitSet;
753 _WaitSet = ESelf;
754 Thread::muxRelease(_WaitLock);
755
756 // Release the outer lock
757 // We call IUnlock (RelaxAssert=true) as a thread T1 might
758 // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
759 // and then stall before it can attempt to wake a successor.
760 // Some other thread T2 acquires the lock, and calls notify(), moving
761 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
762 // and then finds *itself* on the cxq. During the course of a normal
763 // IUnlock() call a thread should _never find itself on the EntryList
764 // or cxq, but in the case of wait() it's possible.
765 // See synchronizer.cpp objectMonitor::wait().
766 IUnlock(true);
767
768 // Wait for either notification or timeout
769 // Beware that in some circumstances we might propagate
770 // spurious wakeups back to the caller.
771
772 for (;;) {
773 if (ESelf->Notified) break;
774 int err = ParkCommon(ESelf, timo);
775 if (err == OS_TIMEOUT) break;
776 }
777
778 // Prepare for reentry - if necessary, remove ESelf from WaitSet
779 // ESelf can be:
780 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
781 // 2. On the cxq or EntryList
782 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
783
784 OrderAccess::fence();
785 int WasOnWaitSet = 0;
786 if (ESelf->Notified == 0) {
787 Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove");
788 if (ESelf->Notified == 0) { // DCL idiom
789 assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet
790 // ESelf is resident on the WaitSet -- unlink it.
791 // A doubly-linked list would be better here so we can unlink in constant-time.
792 // We have to unlink before we potentially recontend as ESelf might otherwise
793 // end up on the cxq|EntryList -- it can't be on two lists at once.
794 ParkEvent * p = _WaitSet;
795 ParkEvent * q = NULL; // classic q chases p
796 while (p != NULL && p != ESelf) {
797 q = p;
798 p = p->ListNext;
799 }
800 assert(p == ESelf, "invariant");
801 if (p == _WaitSet) { // found at head
802 assert(q == NULL, "invariant");
803 _WaitSet = p->ListNext;
804 } else { // found in interior
805 assert(q->ListNext == p, "invariant");
806 q->ListNext = p->ListNext;
807 }
808 WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout
809 }
810 Thread::muxRelease(_WaitLock);
811 }
812
813 // Reentry phase - reacquire the lock
814 if (WasOnWaitSet) {
815 // ESelf was previously on the WaitSet but we just unlinked it above
816 // because of a timeout. ESelf is not resident on any list and is not OnDeck
817 assert(_OnDeck != ESelf, "invariant");
818 ILock(Self);
819 } else {
820 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
821 // ESelf is now on the cxq, EntryList or at the OnDeck position.
822 // The following fragment is extracted from Monitor::ILock()
823 for (;;) {
824 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(Self)) break;
825 ParkCommon(ESelf, 0);
826 }
827 assert(_OnDeck == ESelf, "invariant");
828 _OnDeck = NULL;
829 }
830
831 assert(ILocked(), "invariant");
832 return WasOnWaitSet != 0; // return true IFF timeout
833 }
834
835
836 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
837 // In particular, there are certain types of global lock that may be held
838 // by a Java thread while it is blocked at a safepoint but before it has
839 // written the _owner field. These locks may be sneakily acquired by the
840 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
841 // identify all such locks, and ensure that Java threads never block at
842 // safepoints while holding them (_no_safepoint_check_flag). While it
843 // seems as though this could increase the time to reach a safepoint
844 // (or at least increase the mean, if not the variance), the latter
845 // approach might make for a cleaner, more maintainable JVM design.
846 //
847 // Sneaking is vile and reprehensible and should be excised at the 1st
848 // opportunity. It's possible that the need for sneaking could be obviated
849 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
850 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
851 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
852 // it'll stall at the TBIVM reentry state transition after having acquired the
853 // underlying lock, but before having set _owner and having entered the actual
854 // critical section. The lock-sneaking facility leverages that fact and allowed the
855 // VM thread to logically acquire locks that had already be physically locked by mutators
856 // but where mutators were known blocked by the reentry thread state transition.
857 //
858 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
859 // wrapped calls to park(), then we could likely do away with sneaking. We'd
860 // decouple lock acquisition and parking. The critical invariant to eliminating
861 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
862 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
863 // One difficulty with this approach is that the TBIVM wrapper could recurse and
864 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
865 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
866 //
867 // But of course the proper ultimate approach is to avoid schemes that require explicit
868 // sneaking or dependence on any any clever invariants or subtle implementation properties
869 // of Mutex-Monitor and instead directly address the underlying design flaw.
870
lock(Thread * Self)871 void Monitor::lock(Thread * Self) {
872 // Ensure that the Monitor requires/allows safepoint checks.
873 assert(_safepoint_check_required != Monitor::_safepoint_check_never,
874 "This lock should never have a safepoint check: %s", name());
875
876 #ifdef CHECK_UNHANDLED_OOPS
877 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
878 // or GC threads.
879 if (Self->is_Java_thread()) {
880 Self->clear_unhandled_oops();
881 }
882 #endif // CHECK_UNHANDLED_OOPS
883
884 debug_only(check_prelock_state(Self, StrictSafepointChecks));
885 assert(_owner != Self, "invariant");
886 assert(_OnDeck != Self->_MutexEvent, "invariant");
887
888 if (TryFast()) {
889 Exeunt:
890 assert(ILocked(), "invariant");
891 assert(owner() == NULL, "invariant");
892 set_owner(Self);
893 return;
894 }
895
896 // The lock is contended ...
897
898 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
899 if (can_sneak && _owner == NULL) {
900 // a java thread has locked the lock but has not entered the
901 // critical region -- let's just pretend we've locked the lock
902 // and go on. we note this with _snuck so we can also
903 // pretend to unlock when the time comes.
904 _snuck = true;
905 goto Exeunt;
906 }
907
908 // Try a brief spin to avoid passing thru thread state transition ...
909 if (TrySpin(Self)) goto Exeunt;
910
911 check_block_state(Self);
912 if (Self->is_Java_thread()) {
913 // Horrible dictu - we suffer through a state transition
914 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
915 ThreadBlockInVM tbivm((JavaThread *) Self);
916 ILock(Self);
917 } else {
918 // Mirabile dictu
919 ILock(Self);
920 }
921 goto Exeunt;
922 }
923
lock()924 void Monitor::lock() {
925 this->lock(Thread::current());
926 }
927
928 // Lock without safepoint check - a degenerate variant of lock().
929 // Should ONLY be used by safepoint code and other code
930 // that is guaranteed not to block while running inside the VM. If this is called with
931 // thread state set to be in VM, the safepoint synchronization code will deadlock!
932
lock_without_safepoint_check(Thread * Self)933 void Monitor::lock_without_safepoint_check(Thread * Self) {
934 // Ensure that the Monitor does not require or allow safepoint checks.
935 assert(_safepoint_check_required != Monitor::_safepoint_check_always,
936 "This lock should always have a safepoint check: %s", name());
937 assert(_owner != Self, "invariant");
938 ILock(Self);
939 assert(_owner == NULL, "invariant");
940 set_owner(Self);
941 }
942
lock_without_safepoint_check()943 void Monitor::lock_without_safepoint_check() {
944 lock_without_safepoint_check(Thread::current());
945 }
946
947
948 // Returns true if thread succeeds in grabbing the lock, otherwise false.
949
try_lock()950 bool Monitor::try_lock() {
951 Thread * const Self = Thread::current();
952 debug_only(check_prelock_state(Self, false));
953 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
954
955 // Special case, where all Java threads are stopped.
956 // The lock may have been acquired but _owner is not yet set.
957 // In that case the VM thread can safely grab the lock.
958 // It strikes me this should appear _after the TryLock() fails, below.
959 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
960 if (can_sneak && _owner == NULL) {
961 set_owner(Self); // Do not need to be atomic, since we are at a safepoint
962 _snuck = true;
963 return true;
964 }
965
966 if (TryLock()) {
967 // We got the lock
968 assert(_owner == NULL, "invariant");
969 set_owner(Self);
970 return true;
971 }
972 return false;
973 }
974
unlock()975 void Monitor::unlock() {
976 assert(_owner == Thread::current(), "invariant");
977 assert(_OnDeck != Thread::current()->_MutexEvent, "invariant");
978 set_owner(NULL);
979 if (_snuck) {
980 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
981 _snuck = false;
982 return;
983 }
984 IUnlock(false);
985 }
986
987 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
988 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
989 //
990 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
991 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
992 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
993 // over a pthread_mutex_t would work equally as well, but require more platform-specific
994 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
995 // would work too.
996 //
997 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
998 // instance available. Instead, we transiently allocate a ParkEvent on-demand if
999 // we encounter contention. That ParkEvent remains associated with the thread
1000 // until it manages to acquire the lock, at which time we return the ParkEvent
1001 // to the global ParkEvent free list. This is correct and suffices for our purposes.
1002 //
1003 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1004 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an
1005 // oversight, but I've replicated the original suspect logic in the new code ...
1006
jvm_raw_lock()1007 void Monitor::jvm_raw_lock() {
1008 assert(rank() == native, "invariant");
1009
1010 if (TryLock()) {
1011 Exeunt:
1012 assert(ILocked(), "invariant");
1013 assert(_owner == NULL, "invariant");
1014 // This can potentially be called by non-java Threads. Thus, the Thread::current_or_null()
1015 // might return NULL. Don't call set_owner since it will break on an NULL owner
1016 // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1017 _owner = Thread::current_or_null();
1018 return;
1019 }
1020
1021 if (TrySpin(NULL)) goto Exeunt;
1022
1023 // slow-path - apparent contention
1024 // Allocate a ParkEvent for transient use.
1025 // The ParkEvent remains associated with this thread until
1026 // the time the thread manages to acquire the lock.
1027 ParkEvent * const ESelf = ParkEvent::Allocate(NULL);
1028 ESelf->reset();
1029 OrderAccess::storeload();
1030
1031 // Either Enqueue Self on cxq or acquire the outer lock.
1032 if (AcquireOrPush (ESelf)) {
1033 ParkEvent::Release(ESelf); // surrender the ParkEvent
1034 goto Exeunt;
1035 }
1036
1037 // At any given time there is at most one ondeck thread.
1038 // ondeck implies not resident on cxq and not resident on EntryList
1039 // Only the OnDeck thread can try to acquire -- contend for -- the lock.
1040 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1041 for (;;) {
1042 if (OrderAccess::load_acquire(&_OnDeck) == ESelf && TrySpin(NULL)) break;
1043 ParkCommon(ESelf, 0);
1044 }
1045
1046 assert(_OnDeck == ESelf, "invariant");
1047 _OnDeck = NULL;
1048 ParkEvent::Release(ESelf); // surrender the ParkEvent
1049 goto Exeunt;
1050 }
1051
jvm_raw_unlock()1052 void Monitor::jvm_raw_unlock() {
1053 // Nearly the same as Monitor::unlock() ...
1054 // directly set _owner instead of using set_owner(null)
1055 _owner = NULL;
1056 if (_snuck) { // ???
1057 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1058 _snuck = false;
1059 return;
1060 }
1061 IUnlock(false);
1062 }
1063
wait(bool no_safepoint_check,long timeout,bool as_suspend_equivalent)1064 bool Monitor::wait(bool no_safepoint_check, long timeout,
1065 bool as_suspend_equivalent) {
1066 // Make sure safepoint checking is used properly.
1067 assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false),
1068 "This lock should never have a safepoint check: %s", name());
1069 assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true),
1070 "This lock should always have a safepoint check: %s", name());
1071
1072 Thread * const Self = Thread::current();
1073 assert(_owner == Self, "invariant");
1074 assert(ILocked(), "invariant");
1075
1076 // as_suspend_equivalent logically implies !no_safepoint_check
1077 guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
1078 // !no_safepoint_check logically implies java_thread
1079 guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
1080
1081 #ifdef ASSERT
1082 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1083 assert(least != this, "Specification of get_least_... call above");
1084 if (least != NULL && least->rank() <= special) {
1085 ::tty->print("Attempting to wait on monitor %s/%d while holding"
1086 " lock %s/%d -- possible deadlock",
1087 name(), rank(), least->name(), least->rank());
1088 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1089 }
1090 #endif // ASSERT
1091
1092 int wait_status;
1093 // conceptually set the owner to NULL in anticipation of
1094 // abdicating the lock in wait
1095 set_owner(NULL);
1096 if (no_safepoint_check) {
1097 wait_status = IWait(Self, timeout);
1098 } else {
1099 assert(Self->is_Java_thread(), "invariant");
1100 JavaThread *jt = (JavaThread *)Self;
1101
1102 // Enter safepoint region - ornate and Rococo ...
1103 ThreadBlockInVM tbivm(jt);
1104 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1105
1106 if (as_suspend_equivalent) {
1107 jt->set_suspend_equivalent();
1108 // cleared by handle_special_suspend_equivalent_condition() or
1109 // java_suspend_self()
1110 }
1111
1112 wait_status = IWait(Self, timeout);
1113
1114 // were we externally suspended while we were waiting?
1115 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1116 // Our event wait has finished and we own the lock, but
1117 // while we were waiting another thread suspended us. We don't
1118 // want to hold the lock while suspended because that
1119 // would surprise the thread that suspended us.
1120 assert(ILocked(), "invariant");
1121 IUnlock(true);
1122 jt->java_suspend_self();
1123 ILock(Self);
1124 assert(ILocked(), "invariant");
1125 }
1126 }
1127
1128 // Conceptually reestablish ownership of the lock.
1129 // The "real" lock -- the LockByte -- was reacquired by IWait().
1130 assert(ILocked(), "invariant");
1131 assert(_owner == NULL, "invariant");
1132 set_owner(Self);
1133 return wait_status != 0; // return true IFF timeout
1134 }
1135
~Monitor()1136 Monitor::~Monitor() {
1137 #ifdef ASSERT
1138 uintptr_t owner = UNS(_owner);
1139 uintptr_t lockword = UNS(_LockWord.FullWord);
1140 uintptr_t entrylist = UNS(_EntryList);
1141 uintptr_t waitset = UNS(_WaitSet);
1142 uintptr_t ondeck = UNS(_OnDeck);
1143 // Print _name with precision limit, in case failure is due to memory
1144 // corruption that also trashed _name.
1145 assert((owner|lockword|entrylist|waitset|ondeck) == 0,
1146 "%.*s: _owner(" INTPTR_FORMAT ")|_LockWord(" INTPTR_FORMAT ")|_EntryList(" INTPTR_FORMAT ")|_WaitSet("
1147 INTPTR_FORMAT ")|_OnDeck(" INTPTR_FORMAT ") != 0",
1148 MONITOR_NAME_LEN, _name, owner, lockword, entrylist, waitset, ondeck);
1149 #endif
1150 }
1151
ClearMonitor(Monitor * m,const char * name)1152 void Monitor::ClearMonitor(Monitor * m, const char *name) {
1153 m->_owner = NULL;
1154 m->_snuck = false;
1155 if (name == NULL) {
1156 strcpy(m->_name, "UNKNOWN");
1157 } else {
1158 strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1159 m->_name[MONITOR_NAME_LEN - 1] = '\0';
1160 }
1161 m->_LockWord.FullWord = 0;
1162 m->_EntryList = NULL;
1163 m->_OnDeck = NULL;
1164 m->_WaitSet = NULL;
1165 m->_WaitLock[0] = 0;
1166 }
1167
Monitor()1168 Monitor::Monitor() { ClearMonitor(this); }
1169
Monitor(int Rank,const char * name,bool allow_vm_block,SafepointCheckRequired safepoint_check_required)1170 Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
1171 SafepointCheckRequired safepoint_check_required) {
1172 ClearMonitor(this, name);
1173 #ifdef ASSERT
1174 _allow_vm_block = allow_vm_block;
1175 _rank = Rank;
1176 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1177 #endif
1178 }
1179
Mutex(int Rank,const char * name,bool allow_vm_block,SafepointCheckRequired safepoint_check_required)1180 Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
1181 SafepointCheckRequired safepoint_check_required) {
1182 ClearMonitor((Monitor *) this, name);
1183 #ifdef ASSERT
1184 _allow_vm_block = allow_vm_block;
1185 _rank = Rank;
1186 NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
1187 #endif
1188 }
1189
owned_by_self() const1190 bool Monitor::owned_by_self() const {
1191 bool ret = _owner == Thread::current();
1192 assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant");
1193 return ret;
1194 }
1195
print_on_error(outputStream * st) const1196 void Monitor::print_on_error(outputStream* st) const {
1197 st->print("[" PTR_FORMAT, p2i(this));
1198 st->print("] %s", _name);
1199 st->print(" - owner thread: " PTR_FORMAT, p2i(_owner));
1200 }
1201
1202
1203
1204
1205 // ----------------------------------------------------------------------------------
1206 // Non-product code
1207
1208 #ifndef PRODUCT
print_on(outputStream * st) const1209 void Monitor::print_on(outputStream* st) const {
1210 st->print_cr("Mutex: [" PTR_FORMAT "/" PTR_FORMAT "] %s - owner: " PTR_FORMAT,
1211 p2i(this), _LockWord.FullWord, _name, p2i(_owner));
1212 }
1213 #endif
1214
1215 #ifndef PRODUCT
1216 #ifdef ASSERT
get_least_ranked_lock(Monitor * locks)1217 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1218 Monitor *res, *tmp;
1219 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1220 if (tmp->rank() < res->rank()) {
1221 res = tmp;
1222 }
1223 }
1224 if (!SafepointSynchronize::is_at_safepoint()) {
1225 // In this case, we expect the held locks to be
1226 // in increasing rank order (modulo any native ranks)
1227 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1228 if (tmp->next() != NULL) {
1229 assert(tmp->rank() == Mutex::native ||
1230 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1231 }
1232 }
1233 }
1234 return res;
1235 }
1236
get_least_ranked_lock_besides_this(Monitor * locks)1237 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1238 Monitor *res, *tmp;
1239 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1240 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1241 res = tmp;
1242 }
1243 }
1244 if (!SafepointSynchronize::is_at_safepoint()) {
1245 // In this case, we expect the held locks to be
1246 // in increasing rank order (modulo any native ranks)
1247 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1248 if (tmp->next() != NULL) {
1249 assert(tmp->rank() == Mutex::native ||
1250 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1251 }
1252 }
1253 }
1254 return res;
1255 }
1256
1257
contains(Monitor * locks,Monitor * lock)1258 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1259 for (; locks != NULL; locks = locks->next()) {
1260 if (locks == lock) {
1261 return true;
1262 }
1263 }
1264 return false;
1265 }
1266 #endif
1267
1268 // Called immediately after lock acquisition or release as a diagnostic
1269 // to track the lock-set of the thread and test for rank violations that
1270 // might indicate exposure to deadlock.
1271 // Rather like an EventListener for _owner (:>).
1272
set_owner_implementation(Thread * new_owner)1273 void Monitor::set_owner_implementation(Thread *new_owner) {
1274 // This function is solely responsible for maintaining
1275 // and checking the invariant that threads and locks
1276 // are in a 1/N relation, with some some locks unowned.
1277 // It uses the Mutex::_owner, Mutex::_next, and
1278 // Thread::_owned_locks fields, and no other function
1279 // changes those fields.
1280 // It is illegal to set the mutex from one non-NULL
1281 // owner to another--it must be owned by NULL as an
1282 // intermediate state.
1283
1284 if (new_owner != NULL) {
1285 // the thread is acquiring this lock
1286
1287 assert(new_owner == Thread::current(), "Should I be doing this?");
1288 assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1289 _owner = new_owner; // set the owner
1290
1291 // link "this" into the owned locks list
1292
1293 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
1294 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1295 // Mutex::set_owner_implementation is a friend of Thread
1296
1297 assert(this->rank() >= 0, "bad lock rank");
1298
1299 // Deadlock avoidance rules require us to acquire Mutexes only in
1300 // a global total order. For example m1 is the lowest ranked mutex
1301 // that the thread holds and m2 is the mutex the thread is trying
1302 // to acquire, then deadlock avoidance rules require that the rank
1303 // of m2 be less than the rank of m1.
1304 // The rank Mutex::native is an exception in that it is not subject
1305 // to the verification rules.
1306 // Here are some further notes relating to mutex acquisition anomalies:
1307 // . it is also ok to acquire Safepoint_lock at the very end while we
1308 // already hold Terminator_lock - may happen because of periodic safepoints
1309 if (this->rank() != Mutex::native &&
1310 this->rank() != Mutex::suspend_resume &&
1311 locks != NULL && locks->rank() <= this->rank() &&
1312 !SafepointSynchronize::is_at_safepoint() &&
1313 !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1314 SafepointSynchronize::is_synchronizing())) {
1315 new_owner->print_owned_locks();
1316 fatal("acquiring lock %s/%d out of order with lock %s/%d -- "
1317 "possible deadlock", this->name(), this->rank(),
1318 locks->name(), locks->rank());
1319 }
1320
1321 this->_next = new_owner->_owned_locks;
1322 new_owner->_owned_locks = this;
1323 #endif
1324
1325 } else {
1326 // the thread is releasing this lock
1327
1328 Thread* old_owner = _owner;
1329 debug_only(_last_owner = old_owner);
1330
1331 assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1332 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1333
1334 _owner = NULL; // set the owner
1335
1336 #ifdef ASSERT
1337 Monitor *locks = old_owner->owned_locks();
1338
1339 // remove "this" from the owned locks list
1340
1341 Monitor *prev = NULL;
1342 bool found = false;
1343 for (; locks != NULL; prev = locks, locks = locks->next()) {
1344 if (locks == this) {
1345 found = true;
1346 break;
1347 }
1348 }
1349 assert(found, "Removing a lock not owned");
1350 if (prev == NULL) {
1351 old_owner->_owned_locks = _next;
1352 } else {
1353 prev->_next = _next;
1354 }
1355 _next = NULL;
1356 #endif
1357 }
1358 }
1359
1360
1361 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
check_prelock_state(Thread * thread,bool safepoint_check)1362 void Monitor::check_prelock_state(Thread *thread, bool safepoint_check) {
1363 if (safepoint_check) {
1364 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1365 || rank() == Mutex::special, "wrong thread state for using locks");
1366 if (thread->is_VM_thread() && !allow_vm_block()) {
1367 fatal("VM thread using lock %s (not allowed to block on)", name());
1368 }
1369 debug_only(if (rank() != Mutex::special) \
1370 thread->check_for_valid_safepoint_state(false);)
1371 }
1372 assert(!os::ThreadCrashProtection::is_crash_protected(thread),
1373 "locking not allowed when crash protection is set");
1374 }
1375
check_block_state(Thread * thread)1376 void Monitor::check_block_state(Thread *thread) {
1377 if (!_allow_vm_block && thread->is_VM_thread()) {
1378 warning("VM thread blocked on lock");
1379 print();
1380 BREAKPOINT;
1381 }
1382 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1383 }
1384
1385 #endif // PRODUCT
1386