1
2 /*
3 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "runtime/mutex.hpp"
28 #include "runtime/orderAccess.inline.hpp"
29 #include "runtime/osThread.hpp"
30 #include "runtime/thread.inline.hpp"
31 #include "utilities/events.hpp"
32 #ifdef TARGET_OS_FAMILY_linux
33 # include "mutex_linux.inline.hpp"
34 #endif
35 #ifdef TARGET_OS_FAMILY_solaris
36 # include "mutex_solaris.inline.hpp"
37 #endif
38 #ifdef TARGET_OS_FAMILY_windows
39 # include "mutex_windows.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_FAMILY_bsd
42 # include "mutex_bsd.inline.hpp"
43 #endif
44
45 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
46
47 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
48 //
49 // Native Monitor-Mutex locking - theory of operations
50 //
51 // * Native Monitors are completely unrelated to Java-level monitors,
52 // although the "back-end" slow-path implementations share a common lineage.
53 // See objectMonitor:: in synchronizer.cpp.
54 // Native Monitors do *not* support nesting or recursion but otherwise
55 // they're basically Hoare-flavor monitors.
56 //
57 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
58 // in the _LockWord from zero to non-zero. Note that the _Owner field
59 // is advisory and is used only to verify that the thread calling unlock()
60 // is indeed the last thread to have acquired the lock.
61 //
62 // * Contending threads "push" themselves onto the front of the contention
63 // queue -- called the cxq -- with CAS and then spin/park.
64 // The _LockWord contains the LockByte as well as the pointer to the head
65 // of the cxq. Colocating the LockByte with the cxq precludes certain races.
66 //
67 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
68 // idioms. We currently use MEMBAR in the uncontended unlock() path, as
69 // MEMBAR often has less latency than CAS. If warranted, we could switch to
70 // a CAS:0 mode, using timers to close the resultant race, as is done
71 // with Java Monitors in synchronizer.cpp.
72 //
73 // See the following for a discussion of the relative cost of atomics (CAS)
74 // MEMBAR, and ways to eliminate such instructions from the common-case paths:
75 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
76 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf
77 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
78 // -- synchronizer.cpp
79 //
80 // * Overall goals - desiderata
81 // 1. Minimize context switching
82 // 2. Minimize lock migration
83 // 3. Minimize CPI -- affinity and locality
84 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
85 // 5. Minimize outer lock hold times
86 // 6. Behave gracefully on a loaded system
87 //
88 // * Thread flow and list residency:
89 //
90 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
91 // [..resident on monitor list..]
92 // [...........contending..................]
93 //
94 // -- The contention queue (cxq) contains recently-arrived threads (RATs).
95 // Threads on the cxq eventually drain into the EntryList.
96 // -- Invariant: a thread appears on at most one list -- cxq, EntryList
97 // or WaitSet -- at any one time.
98 // -- For a given monitor there can be at most one "OnDeck" thread at any
99 // given time but if needbe this particular invariant could be relaxed.
100 //
101 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
102 // I use ParkEvent instead of threads as ParkEvents are immortal and
103 // type-stable, meaning we can safely unpark() a possibly stale
104 // list element in the unlock()-path. (That's benign).
105 //
106 // * Succession policy - providing for progress:
107 //
108 // As necessary, the unlock()ing thread identifies, unlinks, and unparks
109 // an "heir presumptive" tentative successor thread from the EntryList.
110 // This becomes the so-called "OnDeck" thread, of which there can be only
111 // one at any given time for a given monitor. The wakee will recontend
112 // for ownership of monitor.
113 //
114 // Succession is provided for by a policy of competitive handoff.
115 // The exiting thread does _not_ grant or pass ownership to the
116 // successor thread. (This is also referred to as "handoff" succession").
117 // Instead the exiting thread releases ownership and possibly wakes
118 // a successor, so the successor can (re)compete for ownership of the lock.
119 //
120 // Competitive handoff provides excellent overall throughput at the expense
121 // of short-term fairness. If fairness is a concern then one remedy might
122 // be to add an AcquireCounter field to the monitor. After a thread acquires
123 // the lock it will decrement the AcquireCounter field. When the count
124 // reaches 0 the thread would reset the AcquireCounter variable, abdicate
125 // the lock directly to some thread on the EntryList, and then move itself to the
126 // tail of the EntryList.
127 //
128 // But in practice most threads engage or otherwise participate in resource
129 // bounded producer-consumer relationships, so lock domination is not usually
130 // a practical concern. Recall too, that in general it's easier to construct
131 // a fair lock from a fast lock, but not vice-versa.
132 //
133 // * The cxq can have multiple concurrent "pushers" but only one concurrent
134 // detaching thread. This mechanism is immune from the ABA corruption.
135 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
136 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
137 // thread constraint.
138 //
139 // * Taken together, the cxq and the EntryList constitute or form a
140 // single logical queue of threads stalled trying to acquire the lock.
141 // We use two distinct lists to reduce heat on the list ends.
142 // Threads in lock() enqueue onto cxq while threads in unlock() will
143 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
144 // A key desideratum is to minimize queue & monitor metadata manipulation
145 // that occurs while holding the "outer" monitor lock -- that is, we want to
146 // minimize monitor lock holds times.
147 //
148 // The EntryList is ordered by the prevailing queue discipline and
149 // can be organized in any convenient fashion, such as a doubly-linked list or
150 // a circular doubly-linked list. If we need a priority queue then something akin
151 // to Solaris' sleepq would work nicely. Viz.,
152 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
153 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
154 // Queue discipline is enforced at ::unlock() time, when the unlocking thread
155 // drains the cxq into the EntryList, and orders or reorders the threads on the
156 // EntryList accordingly.
157 //
158 // Barring "lock barging", this mechanism provides fair cyclic ordering,
159 // somewhat similar to an elevator-scan.
160 //
161 // * OnDeck
162 // -- For a given monitor there can be at most one OnDeck thread at any given
163 // instant. The OnDeck thread is contending for the lock, but has been
164 // unlinked from the EntryList and cxq by some previous unlock() operations.
165 // Once a thread has been designated the OnDeck thread it will remain so
166 // until it manages to acquire the lock -- being OnDeck is a stable property.
167 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
168 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
169 // having cleared the LockByte and dropped the outer lock, attempt to "trylock"
170 // OnDeck by CASing the field from null to non-null. If successful, that thread
171 // is then responsible for progress and succession and can use CAS to detach and
172 // drain the cxq into the EntryList. By convention, only this thread, the holder of
173 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the
174 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
175 // we allow multiple concurrent "push" operations but restrict detach concurrency
176 // to at most one thread. Having selected and detached a successor, the thread then
177 // changes the OnDeck to refer to that successor, and then unparks the successor.
178 // That successor will eventually acquire the lock and clear OnDeck. Beware
179 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
180 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
181 // and then the successor eventually "drops" OnDeck. Note that there's never
182 // any sense of contention on the inner lock, however. Threads never contend
183 // or wait for the inner lock.
184 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
185 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
186 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
187 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
188 //
189 // * Waiting threads reside on the WaitSet list -- wait() puts
190 // the caller onto the WaitSet. Notify() or notifyAll() simply
191 // transfers threads from the WaitSet to either the EntryList or cxq.
192 // Subsequent unlock() operations will eventually unpark the notifyee.
193 // Unparking a notifee in notify() proper is inefficient - if we were to do so
194 // it's likely the notifyee would simply impale itself on the lock held
195 // by the notifier.
196 //
197 // * The mechanism is obstruction-free in that if the holder of the transient
198 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads
199 // can still acquire and release the outer lock and continue to make progress.
200 // At worst, waking of already blocked contending threads may be delayed,
201 // but nothing worse. (We only use "trylock" operations on the inner OnDeck
202 // lock).
203 //
204 // * Note that thread-local storage must be initialized before a thread
205 // uses Native monitors or mutexes. The native monitor-mutex subsystem
206 // depends on Thread::current().
207 //
208 // * The monitor synchronization subsystem avoids the use of native
209 // synchronization primitives except for the narrow platform-specific
210 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
211 // the semantics of park-unpark. Put another way, this monitor implementation
212 // depends only on atomic operations and park-unpark. The monitor subsystem
213 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
214 // underlying OS manages the READY<->RUN transitions.
215 //
216 // * The memory consistency model provide by lock()-unlock() is at least as
217 // strong or stronger than the Java Memory model defined by JSR-133.
218 // That is, we guarantee at least entry consistency, if not stronger.
219 // See http://g.oswego.edu/dl/jmm/cookbook.html.
220 //
221 // * Thread:: currently contains a set of purpose-specific ParkEvents:
222 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
223 // the purpose-specific ParkEvents and instead implement a general per-thread
224 // stack of available ParkEvents which we could provision on-demand. The
225 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
226 // and ::Release(). A thread would simply pop an element from the local stack before it
227 // enqueued or park()ed. When the contention was over the thread would
228 // push the no-longer-needed ParkEvent back onto its stack.
229 //
230 // * A slightly reduced form of ILock() and IUnlock() have been partially
231 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
232 // It'd be interesting to see if TLA/TLC could be useful as well.
233 //
234 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
235 // code should never call other code in the JVM that might itself need to
236 // acquire monitors or mutexes. That's true *except* in the case of the
237 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
238 // mutator reentry (ingress) by checking for a pending safepoint in which case it will
239 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
240 // In that particular case a call to lock() for a given Monitor can end up recursively
241 // calling lock() on another monitor. While distasteful, this is largely benign
242 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
243 //
244 // It's unfortunate that native mutexes and thread state transitions were convolved.
245 // They're really separate concerns and should have remained that way. Melding
246 // them together was facile -- a bit too facile. The current implementation badly
247 // conflates the two concerns.
248 //
249 // * TODO-FIXME:
250 //
251 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
252 // We should also add DTRACE probes in the ParkEvent subsystem for
253 // Park-entry, Park-exit, and Unpark.
254 //
255 // -- We have an excess of mutex-like constructs in the JVM, namely:
256 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
257 // 2. low-level muxAcquire and muxRelease
258 // 3. low-level spinAcquire and spinRelease
259 // 4. native Mutex:: and Monitor::
260 // 5. jvm_raw_lock() and _unlock()
261 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
262 // similar name.
263 //
264 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
265
266
267 // CASPTR() uses the canonical argument order that dominates in the literature.
268 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
269
270 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
271 #define UNS(x) (uintptr_t(x))
272 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
273
274 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
275 // Bijective except for the trailing mask operation.
276 // Useful for spin loops as the compiler can't optimize it away.
277
MarsagliaXORV(jint x)278 static inline jint MarsagliaXORV (jint x) {
279 if (x == 0) x = 1|os::random() ;
280 x ^= x << 6;
281 x ^= ((unsigned)x) >> 21;
282 x ^= x << 7 ;
283 return x & 0x7FFFFFFF ;
284 }
285
Stall(int its)286 static int Stall (int its) {
287 static volatile jint rv = 1 ;
288 volatile int OnFrame = 0 ;
289 jint v = rv ^ UNS(OnFrame) ;
290 while (--its >= 0) {
291 v = MarsagliaXORV (v) ;
292 }
293 // Make this impossible for the compiler to optimize away,
294 // but (mostly) avoid W coherency sharing on MP systems.
295 if (v == 0x12345) rv = v ;
296 return v ;
297 }
298
TryLock()299 int Monitor::TryLock () {
300 intptr_t v = _LockWord.FullWord ;
301 for (;;) {
302 if ((v & _LBIT) != 0) return 0 ;
303 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
304 if (v == u) return 1 ;
305 v = u ;
306 }
307 }
308
TryFast()309 int Monitor::TryFast () {
310 // Optimistic fast-path form ...
311 // Fast-path attempt for the common uncontended case.
312 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
313 intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ...
314 if (v == 0) return 1 ;
315
316 for (;;) {
317 if ((v & _LBIT) != 0) return 0 ;
318 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
319 if (v == u) return 1 ;
320 v = u ;
321 }
322 }
323
ILocked()324 int Monitor::ILocked () {
325 const intptr_t w = _LockWord.FullWord & 0xFF ;
326 assert (w == 0 || w == _LBIT, "invariant") ;
327 return w == _LBIT ;
328 }
329
330 // Polite TATAS spinlock with exponential backoff - bounded spin.
331 // Ideally we'd use processor cycles, time or vtime to control
332 // the loop, but we currently use iterations.
333 // All the constants within were derived empirically but work over
334 // over the spectrum of J2SE reference platforms.
335 // On Niagara-class systems the back-off is unnecessary but
336 // is relatively harmless. (At worst it'll slightly retard
337 // acquisition times). The back-off is critical for older SMP systems
338 // where constant fetching of the LockWord would otherwise impair
339 // scalability.
340 //
341 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
342 // See synchronizer.cpp for details and rationale.
343
TrySpin(Thread * const Self)344 int Monitor::TrySpin (Thread * const Self) {
345 if (TryLock()) return 1 ;
346 if (!os::is_MP()) return 0 ;
347
348 int Probes = 0 ;
349 int Delay = 0 ;
350 int Steps = 0 ;
351 int SpinMax = NativeMonitorSpinLimit ;
352 int flgs = NativeMonitorFlags ;
353 for (;;) {
354 intptr_t v = _LockWord.FullWord;
355 if ((v & _LBIT) == 0) {
356 if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
357 return 1 ;
358 }
359 continue ;
360 }
361
362 if ((flgs & 8) == 0) {
363 SpinPause () ;
364 }
365
366 // Periodically increase Delay -- variable Delay form
367 // conceptually: delay *= 1 + 1/Exponent
368 ++ Probes;
369 if (Probes > SpinMax) return 0 ;
370
371 if ((Probes & 0x7) == 0) {
372 Delay = ((Delay << 1)|1) & 0x7FF ;
373 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
374 }
375
376 if (flgs & 2) continue ;
377
378 // Consider checking _owner's schedctl state, if OFFPROC abort spin.
379 // If the owner is OFFPROC then it's unlike that the lock will be dropped
380 // in a timely fashion, which suggests that spinning would not be fruitful
381 // or profitable.
382
383 // Stall for "Delay" time units - iterations in the current implementation.
384 // Avoid generating coherency traffic while stalled.
385 // Possible ways to delay:
386 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
387 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
388 // Note that on Niagara-class systems we want to minimize STs in the
389 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
390 // Furthermore, they don't have a W$ like traditional SPARC processors.
391 // We currently use a Marsaglia Shift-Xor RNG loop.
392 Steps += Delay ;
393 if (Self != NULL) {
394 jint rv = Self->rng[0] ;
395 for (int k = Delay ; --k >= 0; ) {
396 rv = MarsagliaXORV (rv) ;
397 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
398 }
399 Self->rng[0] = rv ;
400 } else {
401 Stall (Delay) ;
402 }
403 }
404 }
405
ParkCommon(ParkEvent * ev,jlong timo)406 static int ParkCommon (ParkEvent * ev, jlong timo) {
407 // Diagnostic support - periodically unwedge blocked threads
408 intx nmt = NativeMonitorTimeout ;
409 if (nmt > 0 && (nmt < timo || timo <= 0)) {
410 timo = nmt ;
411 }
412 int err = OS_OK ;
413 if (0 == timo) {
414 ev->park() ;
415 } else {
416 err = ev->park(timo) ;
417 }
418 return err ;
419 }
420
AcquireOrPush(ParkEvent * ESelf)421 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
422 intptr_t v = _LockWord.FullWord ;
423 for (;;) {
424 if ((v & _LBIT) == 0) {
425 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
426 if (u == v) return 1 ; // indicate acquired
427 v = u ;
428 } else {
429 // Anticipate success ...
430 ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
431 const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
432 if (u == v) return 0 ; // indicate pushed onto cxq
433 v = u ;
434 }
435 // Interference - LockWord change - just retry
436 }
437 }
438
439 // ILock and IWait are the lowest level primitive internal blocking
440 // synchronization functions. The callers of IWait and ILock must have
441 // performed any needed state transitions beforehand.
442 // IWait and ILock may directly call park() without any concern for thread state.
443 // Note that ILock and IWait do *not* access _owner.
444 // _owner is a higher-level logical concept.
445
ILock(Thread * Self)446 void Monitor::ILock (Thread * Self) {
447 assert (_OnDeck != Self->_MutexEvent, "invariant") ;
448
449 if (TryFast()) {
450 Exeunt:
451 assert (ILocked(), "invariant") ;
452 return ;
453 }
454
455 ParkEvent * const ESelf = Self->_MutexEvent ;
456 assert (_OnDeck != ESelf, "invariant") ;
457
458 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
459 // Synchronizer.cpp uses a similar optimization.
460 if (TrySpin (Self)) goto Exeunt ;
461
462 // Slow-path - the lock is contended.
463 // Either Enqueue Self on cxq or acquire the outer lock.
464 // LockWord encoding = (cxq,LOCKBYTE)
465 ESelf->reset() ;
466 OrderAccess::fence() ;
467
468 // Optional optimization ... try barging on the inner lock
469 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
470 goto OnDeck_LOOP ;
471 }
472
473 if (AcquireOrPush (ESelf)) goto Exeunt ;
474
475 // At any given time there is at most one ondeck thread.
476 // ondeck implies not resident on cxq and not resident on EntryList
477 // Only the OnDeck thread can try to acquire -- contended for -- the lock.
478 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
479 // Deschedule Self so that others may run.
480 while (_OnDeck != ESelf) {
481 ParkCommon (ESelf, 0) ;
482 }
483
484 // Self is now in the ONDECK position and will remain so until it
485 // manages to acquire the lock.
486 OnDeck_LOOP:
487 for (;;) {
488 assert (_OnDeck == ESelf, "invariant") ;
489 if (TrySpin (Self)) break ;
490 // CONSIDER: if ESelf->TryPark() && TryLock() break ...
491 // It's probably wise to spin only if we *actually* blocked
492 // CONSIDER: check the lockbyte, if it remains set then
493 // preemptively drain the cxq into the EntryList.
494 // The best place and time to perform queue operations -- lock metadata --
495 // is _before having acquired the outer lock, while waiting for the lock to drop.
496 ParkCommon (ESelf, 0) ;
497 }
498
499 assert (_OnDeck == ESelf, "invariant") ;
500 _OnDeck = NULL ;
501
502 // Note that we current drop the inner lock (clear OnDeck) in the slow-path
503 // epilog immediately after having acquired the outer lock.
504 // But instead we could consider the following optimizations:
505 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
506 // This might avoid potential reacquisition of the inner lock in IUlock().
507 // B. While still holding the inner lock, attempt to opportunistically select
508 // and unlink the next ONDECK thread from the EntryList.
509 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
510 // It's critical that the select-and-unlink operation run in constant-time as
511 // it executes when holding the outer lock and may artificially increase the
512 // effective length of the critical section.
513 // Note that (A) and (B) are tantamount to succession by direct handoff for
514 // the inner lock.
515 goto Exeunt ;
516 }
517
IUnlock(bool RelaxAssert)518 void Monitor::IUnlock (bool RelaxAssert) {
519 assert (ILocked(), "invariant") ;
520 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
521 // before the store that releases the lock. Crucially, all the stores and loads in the
522 // critical section must be globally visible before the store of 0 into the lock-word
523 // that releases the lock becomes globally visible. That is, memory accesses in the
524 // critical section should not be allowed to bypass or overtake the following ST that
525 // releases the lock. As such, to prevent accesses within the critical section
526 // from "leaking" out, we need a release fence between the critical section and the
527 // store that releases the lock. In practice that release barrier is elided on
528 // platforms with strong memory models such as TSO.
529 //
530 // Note that the OrderAccess::storeload() fence that appears after unlock store
531 // provides for progress conditions and succession and is _not related to exclusion
532 // safety or lock release consistency.
533 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
534
535 OrderAccess::storeload ();
536 ParkEvent * const w = _OnDeck ;
537 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
538 if (w != NULL) {
539 // Either we have a valid ondeck thread or ondeck is transiently "locked"
540 // by some exiting thread as it arranges for succession. The LSBit of
541 // OnDeck allows us to discriminate two cases. If the latter, the
542 // responsibility for progress and succession lies with that other thread.
543 // For good performance, we also depend on the fact that redundant unpark()
544 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread
545 // is inexpensive. This approach provides implicit futile wakeup throttling.
546 // Note that the referent "w" might be stale with respect to the lock.
547 // In that case the following unpark() is harmless and the worst that'll happen
548 // is a spurious return from a park() operation. Critically, if "w" _is stale,
549 // then progress is known to have occurred as that means the thread associated
550 // with "w" acquired the lock. In that case this thread need take no further
551 // action to guarantee progress.
552 if ((UNS(w) & _LBIT) == 0) w->unpark() ;
553 return ;
554 }
555
556 intptr_t cxq = _LockWord.FullWord ;
557 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
558 return ; // normal fast-path exit - cxq and EntryList both empty
559 }
560 if (cxq & _LBIT) {
561 // Optional optimization ...
562 // Some other thread acquired the lock in the window since this
563 // thread released it. Succession is now that thread's responsibility.
564 return ;
565 }
566
567 Succession:
568 // Slow-path exit - this thread must ensure succession and progress.
569 // OnDeck serves as lock to protect cxq and EntryList.
570 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
571 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
572 // but only one concurrent consumer (detacher of RATs).
573 // Consider protecting this critical section with schedctl on Solaris.
574 // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
575 // picks a successor and marks that thread as OnDeck. That successor
576 // thread will then clear OnDeck once it eventually acquires the outer lock.
577 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
578 return ;
579 }
580
581 ParkEvent * List = _EntryList ;
582 if (List != NULL) {
583 // Transfer the head of the EntryList to the OnDeck position.
584 // Once OnDeck, a thread stays OnDeck until it acquires the lock.
585 // For a given lock there is at most OnDeck thread at any one instant.
586 WakeOne:
587 assert (List == _EntryList, "invariant") ;
588 ParkEvent * const w = List ;
589 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
590 _EntryList = w->ListNext ;
591 // as a diagnostic measure consider setting w->_ListNext = BAD
592 assert (UNS(_OnDeck) == _LBIT, "invariant") ;
593 _OnDeck = w ; // pass OnDeck to w.
594 // w will clear OnDeck once it acquires the outer lock
595
596 // Another optional optimization ...
597 // For heavily contended locks it's not uncommon that some other
598 // thread acquired the lock while this thread was arranging succession.
599 // Try to defer the unpark() operation - Delegate the responsibility
600 // for unpark()ing the OnDeck thread to the current or subsequent owners
601 // That is, the new owner is responsible for unparking the OnDeck thread.
602 OrderAccess::storeload() ;
603 cxq = _LockWord.FullWord ;
604 if (cxq & _LBIT) return ;
605
606 w->unpark() ;
607 return ;
608 }
609
610 cxq = _LockWord.FullWord ;
611 if ((cxq & ~_LBIT) != 0) {
612 // The EntryList is empty but the cxq is populated.
613 // drain RATs from cxq into EntryList
614 // Detach RATs segment with CAS and then merge into EntryList
615 for (;;) {
616 // optional optimization - if locked, the owner is responsible for succession
617 if (cxq & _LBIT) goto Punt ;
618 const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
619 if (vfy == cxq) break ;
620 cxq = vfy ;
621 // Interference - LockWord changed - Just retry
622 // We can see concurrent interference from contending threads
623 // pushing themselves onto the cxq or from lock-unlock operations.
624 // From the perspective of this thread, EntryList is stable and
625 // the cxq is prepend-only -- the head is volatile but the interior
626 // of the cxq is stable. In theory if we encounter interference from threads
627 // pushing onto cxq we could simply break off the original cxq suffix and
628 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
629 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
630 // when we first fetch cxq above. Between the fetch -- where we observed "A"
631 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
632 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
633 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
634 // Note too, that it's safe for this thread to traverse the cxq
635 // without taking any special concurrency precautions.
636 }
637
638 // We don't currently reorder the cxq segment as we move it onto
639 // the EntryList, but it might make sense to reverse the order
640 // or perhaps sort by thread priority. See the comments in
641 // synchronizer.cpp objectMonitor::exit().
642 assert (_EntryList == NULL, "invariant") ;
643 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
644 assert (List != NULL, "invariant") ;
645 goto WakeOne ;
646 }
647
648 // cxq|EntryList is empty.
649 // w == NULL implies that cxq|EntryList == NULL in the past.
650 // Possible race - rare inopportune interleaving.
651 // A thread could have added itself to cxq since this thread previously checked.
652 // Detect and recover by refetching cxq.
653 Punt:
654 assert (UNS(_OnDeck) == _LBIT, "invariant") ;
655 _OnDeck = NULL ; // Release inner lock.
656 OrderAccess::storeload(); // Dekker duality - pivot point
657
658 // Resample LockWord/cxq to recover from possible race.
659 // For instance, while this thread T1 held OnDeck, some other thread T2 might
660 // acquire the outer lock. Another thread T3 might try to acquire the outer
661 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
662 // outer lock, but skips succession as this thread T1 still holds OnDeck.
663 // T1 is and remains responsible for ensuring succession of T3.
664 //
665 // Note that we don't need to recheck EntryList, just cxq.
666 // If threads moved onto EntryList since we dropped OnDeck
667 // that implies some other thread forced succession.
668 cxq = _LockWord.FullWord ;
669 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
670 goto Succession ; // potential race -- re-run succession
671 }
672 return ;
673 }
674
notify()675 bool Monitor::notify() {
676 assert (_owner == Thread::current(), "invariant") ;
677 assert (ILocked(), "invariant") ;
678 if (_WaitSet == NULL) return true ;
679 NotifyCount ++ ;
680
681 // Transfer one thread from the WaitSet to the EntryList or cxq.
682 // Currently we just unlink the head of the WaitSet and prepend to the cxq.
683 // And of course we could just unlink it and unpark it, too, but
684 // in that case it'd likely impale itself on the reentry.
685 Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
686 ParkEvent * nfy = _WaitSet ;
687 if (nfy != NULL) { // DCL idiom
688 _WaitSet = nfy->ListNext ;
689 assert (nfy->Notified == 0, "invariant") ;
690 // push nfy onto the cxq
691 for (;;) {
692 const intptr_t v = _LockWord.FullWord ;
693 assert ((v & 0xFF) == _LBIT, "invariant") ;
694 nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
695 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
696 // interference - _LockWord changed -- just retry
697 }
698 // Note that setting Notified before pushing nfy onto the cxq is
699 // also legal and safe, but the safety properties are much more
700 // subtle, so for the sake of code stewardship ...
701 OrderAccess::fence() ;
702 nfy->Notified = 1;
703 }
704 Thread::muxRelease (_WaitLock) ;
705 if (nfy != NULL && (NativeMonitorFlags & 16)) {
706 // Experimental code ... light up the wakee in the hope that this thread (the owner)
707 // will drop the lock just about the time the wakee comes ONPROC.
708 nfy->unpark() ;
709 }
710 assert (ILocked(), "invariant") ;
711 return true ;
712 }
713
714 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
715 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
716 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
717 // Beware too, that we invert the order of the waiters. Lets say that the
718 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
719 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
720
notify_all()721 bool Monitor::notify_all() {
722 assert (_owner == Thread::current(), "invariant") ;
723 assert (ILocked(), "invariant") ;
724 while (_WaitSet != NULL) notify() ;
725 return true ;
726 }
727
IWait(Thread * Self,jlong timo)728 int Monitor::IWait (Thread * Self, jlong timo) {
729 assert (ILocked(), "invariant") ;
730
731 // Phases:
732 // 1. Enqueue Self on WaitSet - currently prepend
733 // 2. unlock - drop the outer lock
734 // 3. wait for either notification or timeout
735 // 4. lock - reentry - reacquire the outer lock
736
737 ParkEvent * const ESelf = Self->_MutexEvent ;
738 ESelf->Notified = 0 ;
739 ESelf->reset() ;
740 OrderAccess::fence() ;
741
742 // Add Self to WaitSet
743 // Ideally only the holder of the outer lock would manipulate the WaitSet -
744 // That is, the outer lock would implicitly protect the WaitSet.
745 // But if a thread in wait() encounters a timeout it will need to dequeue itself
746 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
747 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
748 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
749 // on the WaitSet can't be allowed to compete for the lock until it has managed to
750 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
751 // Contention on the WaitLock is minimal.
752 //
753 // Another viable approach would be add another ParkEvent, "WaitEvent" to the
754 // thread class. The WaitSet would be composed of WaitEvents. Only the
755 // owner of the outer lock would manipulate the WaitSet. A thread in wait()
756 // could then compete for the outer lock, and then, if necessary, unlink itself
757 // from the WaitSet only after having acquired the outer lock. More precisely,
758 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
759 // on the WaitSet; release the outer lock; wait for either notification or timeout;
760 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
761 //
762 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
763 // One set would be for the WaitSet and one for the EntryList.
764 // We could also deconstruct the ParkEvent into a "pure" event and add a
765 // new immortal/TSM "ListElement" class that referred to ParkEvents.
766 // In that case we could have one ListElement on the WaitSet and another
767 // on the EntryList, with both referring to the same pure Event.
768
769 Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
770 ESelf->ListNext = _WaitSet ;
771 _WaitSet = ESelf ;
772 Thread::muxRelease (_WaitLock) ;
773
774 // Release the outer lock
775 // We call IUnlock (RelaxAssert=true) as a thread T1 might
776 // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
777 // and then stall before it can attempt to wake a successor.
778 // Some other thread T2 acquires the lock, and calls notify(), moving
779 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
780 // and then finds *itself* on the cxq. During the course of a normal
781 // IUnlock() call a thread should _never find itself on the EntryList
782 // or cxq, but in the case of wait() it's possible.
783 // See synchronizer.cpp objectMonitor::wait().
784 IUnlock (true) ;
785
786 // Wait for either notification or timeout
787 // Beware that in some circumstances we might propagate
788 // spurious wakeups back to the caller.
789
790 for (;;) {
791 if (ESelf->Notified) break ;
792 int err = ParkCommon (ESelf, timo) ;
793 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
794 }
795
796 // Prepare for reentry - if necessary, remove ESelf from WaitSet
797 // ESelf can be:
798 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
799 // 2. On the cxq or EntryList
800 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
801
802 OrderAccess::fence() ;
803 int WasOnWaitSet = 0 ;
804 if (ESelf->Notified == 0) {
805 Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
806 if (ESelf->Notified == 0) { // DCL idiom
807 assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet
808 // ESelf is resident on the WaitSet -- unlink it.
809 // A doubly-linked list would be better here so we can unlink in constant-time.
810 // We have to unlink before we potentially recontend as ESelf might otherwise
811 // end up on the cxq|EntryList -- it can't be on two lists at once.
812 ParkEvent * p = _WaitSet ;
813 ParkEvent * q = NULL ; // classic q chases p
814 while (p != NULL && p != ESelf) {
815 q = p ;
816 p = p->ListNext ;
817 }
818 assert (p == ESelf, "invariant") ;
819 if (p == _WaitSet) { // found at head
820 assert (q == NULL, "invariant") ;
821 _WaitSet = p->ListNext ;
822 } else { // found in interior
823 assert (q->ListNext == p, "invariant") ;
824 q->ListNext = p->ListNext ;
825 }
826 WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout
827 }
828 Thread::muxRelease (_WaitLock) ;
829 }
830
831 // Reentry phase - reacquire the lock
832 if (WasOnWaitSet) {
833 // ESelf was previously on the WaitSet but we just unlinked it above
834 // because of a timeout. ESelf is not resident on any list and is not OnDeck
835 assert (_OnDeck != ESelf, "invariant") ;
836 ILock (Self) ;
837 } else {
838 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
839 // ESelf is now on the cxq, EntryList or at the OnDeck position.
840 // The following fragment is extracted from Monitor::ILock()
841 for (;;) {
842 if (_OnDeck == ESelf && TrySpin(Self)) break ;
843 ParkCommon (ESelf, 0) ;
844 }
845 assert (_OnDeck == ESelf, "invariant") ;
846 _OnDeck = NULL ;
847 }
848
849 assert (ILocked(), "invariant") ;
850 return WasOnWaitSet != 0 ; // return true IFF timeout
851 }
852
853
854 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
855 // In particular, there are certain types of global lock that may be held
856 // by a Java thread while it is blocked at a safepoint but before it has
857 // written the _owner field. These locks may be sneakily acquired by the
858 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
859 // identify all such locks, and ensure that Java threads never block at
860 // safepoints while holding them (_no_safepoint_check_flag). While it
861 // seems as though this could increase the time to reach a safepoint
862 // (or at least increase the mean, if not the variance), the latter
863 // approach might make for a cleaner, more maintainable JVM design.
864 //
865 // Sneaking is vile and reprehensible and should be excised at the 1st
866 // opportunity. It's possible that the need for sneaking could be obviated
867 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
868 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
869 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
870 // it'll stall at the TBIVM reentry state transition after having acquired the
871 // underlying lock, but before having set _owner and having entered the actual
872 // critical section. The lock-sneaking facility leverages that fact and allowed the
873 // VM thread to logically acquire locks that had already be physically locked by mutators
874 // but where mutators were known blocked by the reentry thread state transition.
875 //
876 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
877 // wrapped calls to park(), then we could likely do away with sneaking. We'd
878 // decouple lock acquisition and parking. The critical invariant to eliminating
879 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
880 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
881 // One difficulty with this approach is that the TBIVM wrapper could recurse and
882 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
883 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
884 //
885 // But of course the proper ultimate approach is to avoid schemes that require explicit
886 // sneaking or dependence on any any clever invariants or subtle implementation properties
887 // of Mutex-Monitor and instead directly address the underlying design flaw.
888
lock(Thread * Self)889 void Monitor::lock (Thread * Self) {
890 #ifdef CHECK_UNHANDLED_OOPS
891 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
892 // or GC threads.
893 if (Self->is_Java_thread()) {
894 Self->clear_unhandled_oops();
895 }
896 #endif // CHECK_UNHANDLED_OOPS
897
898 debug_only(check_prelock_state(Self));
899 assert (_owner != Self , "invariant") ;
900 assert (_OnDeck != Self->_MutexEvent, "invariant") ;
901
902 if (TryFast()) {
903 Exeunt:
904 assert (ILocked(), "invariant") ;
905 assert (owner() == NULL, "invariant");
906 set_owner (Self);
907 return ;
908 }
909
910 // The lock is contended ...
911
912 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
913 if (can_sneak && _owner == NULL) {
914 // a java thread has locked the lock but has not entered the
915 // critical region -- let's just pretend we've locked the lock
916 // and go on. we note this with _snuck so we can also
917 // pretend to unlock when the time comes.
918 _snuck = true;
919 goto Exeunt ;
920 }
921
922 // Try a brief spin to avoid passing thru thread state transition ...
923 if (TrySpin (Self)) goto Exeunt ;
924
925 check_block_state(Self);
926 if (Self->is_Java_thread()) {
927 // Horribile dictu - we suffer through a state transition
928 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
929 ThreadBlockInVM tbivm ((JavaThread *) Self) ;
930 ILock (Self) ;
931 } else {
932 // Mirabile dictu
933 ILock (Self) ;
934 }
935 goto Exeunt ;
936 }
937
lock()938 void Monitor::lock() {
939 this->lock(Thread::current());
940 }
941
942 // Lock without safepoint check - a degenerate variant of lock().
943 // Should ONLY be used by safepoint code and other code
944 // that is guaranteed not to block while running inside the VM. If this is called with
945 // thread state set to be in VM, the safepoint synchronization code will deadlock!
946
lock_without_safepoint_check(Thread * Self)947 void Monitor::lock_without_safepoint_check (Thread * Self) {
948 assert (_owner != Self, "invariant") ;
949 ILock (Self) ;
950 assert (_owner == NULL, "invariant");
951 set_owner (Self);
952 }
953
lock_without_safepoint_check()954 void Monitor::lock_without_safepoint_check () {
955 lock_without_safepoint_check (Thread::current()) ;
956 }
957
958
959 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
960
try_lock()961 bool Monitor::try_lock() {
962 Thread * const Self = Thread::current();
963 debug_only(check_prelock_state(Self));
964 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
965
966 // Special case, where all Java threads are stopped.
967 // The lock may have been acquired but _owner is not yet set.
968 // In that case the VM thread can safely grab the lock.
969 // It strikes me this should appear _after the TryLock() fails, below.
970 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
971 if (can_sneak && _owner == NULL) {
972 set_owner(Self); // Do not need to be atomic, since we are at a safepoint
973 _snuck = true;
974 return true;
975 }
976
977 if (TryLock()) {
978 // We got the lock
979 assert (_owner == NULL, "invariant");
980 set_owner (Self);
981 return true;
982 }
983 return false;
984 }
985
unlock()986 void Monitor::unlock() {
987 assert (_owner == Thread::current(), "invariant") ;
988 assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
989 set_owner (NULL) ;
990 if (_snuck) {
991 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
992 _snuck = false;
993 return ;
994 }
995 IUnlock (false) ;
996 }
997
998 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
999 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
1000 //
1001 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
1002 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
1003 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
1004 // over a pthread_mutex_t would work equally as well, but require more platform-specific
1005 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
1006 // would work too.
1007 //
1008 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1009 // instance available. Instead, we transiently allocate a ParkEvent on-demand if
1010 // we encounter contention. That ParkEvent remains associated with the thread
1011 // until it manages to acquire the lock, at which time we return the ParkEvent
1012 // to the global ParkEvent free list. This is correct and suffices for our purposes.
1013 //
1014 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1015 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an
1016 // oversight, but I've replicated the original suspect logic in the new code ...
1017
jvm_raw_lock()1018 void Monitor::jvm_raw_lock() {
1019 assert(rank() == native, "invariant");
1020
1021 if (TryLock()) {
1022 Exeunt:
1023 assert (ILocked(), "invariant") ;
1024 assert (_owner == NULL, "invariant");
1025 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1026 // might return NULL. Don't call set_owner since it will break on an NULL owner
1027 // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1028 _owner = ThreadLocalStorage::thread();
1029 return ;
1030 }
1031
1032 if (TrySpin(NULL)) goto Exeunt ;
1033
1034 // slow-path - apparent contention
1035 // Allocate a ParkEvent for transient use.
1036 // The ParkEvent remains associated with this thread until
1037 // the time the thread manages to acquire the lock.
1038 ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
1039 ESelf->reset() ;
1040 OrderAccess::storeload() ;
1041
1042 // Either Enqueue Self on cxq or acquire the outer lock.
1043 if (AcquireOrPush (ESelf)) {
1044 ParkEvent::Release (ESelf) ; // surrender the ParkEvent
1045 goto Exeunt ;
1046 }
1047
1048 // At any given time there is at most one ondeck thread.
1049 // ondeck implies not resident on cxq and not resident on EntryList
1050 // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1051 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1052 for (;;) {
1053 if (_OnDeck == ESelf && TrySpin(NULL)) break ;
1054 ParkCommon (ESelf, 0) ;
1055 }
1056
1057 assert (_OnDeck == ESelf, "invariant") ;
1058 _OnDeck = NULL ;
1059 ParkEvent::Release (ESelf) ; // surrender the ParkEvent
1060 goto Exeunt ;
1061 }
1062
jvm_raw_unlock()1063 void Monitor::jvm_raw_unlock() {
1064 // Nearly the same as Monitor::unlock() ...
1065 // directly set _owner instead of using set_owner(null)
1066 _owner = NULL ;
1067 if (_snuck) { // ???
1068 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1069 _snuck = false;
1070 return ;
1071 }
1072 IUnlock(false) ;
1073 }
1074
wait(bool no_safepoint_check,long timeout,bool as_suspend_equivalent)1075 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
1076 Thread * const Self = Thread::current() ;
1077 assert (_owner == Self, "invariant") ;
1078 assert (ILocked(), "invariant") ;
1079
1080 // as_suspend_equivalent logically implies !no_safepoint_check
1081 guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
1082 // !no_safepoint_check logically implies java_thread
1083 guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
1084
1085 #ifdef ASSERT
1086 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1087 assert(least != this, "Specification of get_least_... call above");
1088 if (least != NULL && least->rank() <= special) {
1089 tty->print("Attempting to wait on monitor %s/%d while holding"
1090 " lock %s/%d -- possible deadlock",
1091 name(), rank(), least->name(), least->rank());
1092 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1093 }
1094 #endif // ASSERT
1095
1096 int wait_status ;
1097 // conceptually set the owner to NULL in anticipation of
1098 // abdicating the lock in wait
1099 set_owner(NULL);
1100 if (no_safepoint_check) {
1101 wait_status = IWait (Self, timeout) ;
1102 } else {
1103 assert (Self->is_Java_thread(), "invariant") ;
1104 JavaThread *jt = (JavaThread *)Self;
1105
1106 // Enter safepoint region - ornate and Rococo ...
1107 ThreadBlockInVM tbivm(jt);
1108 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1109
1110 if (as_suspend_equivalent) {
1111 jt->set_suspend_equivalent();
1112 // cleared by handle_special_suspend_equivalent_condition() or
1113 // java_suspend_self()
1114 }
1115
1116 wait_status = IWait (Self, timeout) ;
1117
1118 // were we externally suspended while we were waiting?
1119 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1120 // Our event wait has finished and we own the lock, but
1121 // while we were waiting another thread suspended us. We don't
1122 // want to hold the lock while suspended because that
1123 // would surprise the thread that suspended us.
1124 assert (ILocked(), "invariant") ;
1125 IUnlock (true) ;
1126 jt->java_suspend_self();
1127 ILock (Self) ;
1128 assert (ILocked(), "invariant") ;
1129 }
1130 }
1131
1132 // Conceptually reestablish ownership of the lock.
1133 // The "real" lock -- the LockByte -- was reacquired by IWait().
1134 assert (ILocked(), "invariant") ;
1135 assert (_owner == NULL, "invariant") ;
1136 set_owner (Self) ;
1137 return wait_status != 0 ; // return true IFF timeout
1138 }
1139
~Monitor()1140 Monitor::~Monitor() {
1141 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1142 }
1143
ClearMonitor(Monitor * m,const char * name)1144 void Monitor::ClearMonitor (Monitor * m, const char *name) {
1145 m->_owner = NULL ;
1146 m->_snuck = false ;
1147 if (name == NULL) {
1148 strcpy(m->_name, "UNKNOWN") ;
1149 } else {
1150 strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1151 m->_name[MONITOR_NAME_LEN - 1] = '\0';
1152 }
1153 m->_LockWord.FullWord = 0 ;
1154 m->_EntryList = NULL ;
1155 m->_OnDeck = NULL ;
1156 m->_WaitSet = NULL ;
1157 m->_WaitLock[0] = 0 ;
1158 }
1159
Monitor()1160 Monitor::Monitor() { ClearMonitor(this); }
1161
Monitor(int Rank,const char * name,bool allow_vm_block)1162 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
1163 ClearMonitor (this, name) ;
1164 #ifdef ASSERT
1165 _allow_vm_block = allow_vm_block;
1166 _rank = Rank ;
1167 #endif
1168 }
1169
~Mutex()1170 Mutex::~Mutex() {
1171 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1172 }
1173
Mutex(int Rank,const char * name,bool allow_vm_block)1174 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
1175 ClearMonitor ((Monitor *) this, name) ;
1176 #ifdef ASSERT
1177 _allow_vm_block = allow_vm_block;
1178 _rank = Rank ;
1179 #endif
1180 }
1181
owned_by_self() const1182 bool Monitor::owned_by_self() const {
1183 bool ret = _owner == Thread::current();
1184 assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
1185 return ret;
1186 }
1187
print_on_error(outputStream * st) const1188 void Monitor::print_on_error(outputStream* st) const {
1189 st->print("[" PTR_FORMAT, this);
1190 st->print("] %s", _name);
1191 st->print(" - owner thread: " PTR_FORMAT, _owner);
1192 }
1193
1194
1195
1196
1197 // ----------------------------------------------------------------------------------
1198 // Non-product code
1199
1200 #ifndef PRODUCT
print_on(outputStream * st) const1201 void Monitor::print_on(outputStream* st) const {
1202 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
1203 }
1204 #endif
1205
1206 #ifndef PRODUCT
1207 #ifdef ASSERT
get_least_ranked_lock(Monitor * locks)1208 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1209 Monitor *res, *tmp;
1210 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1211 if (tmp->rank() < res->rank()) {
1212 res = tmp;
1213 }
1214 }
1215 if (!SafepointSynchronize::is_at_safepoint()) {
1216 // In this case, we expect the held locks to be
1217 // in increasing rank order (modulo any native ranks)
1218 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1219 if (tmp->next() != NULL) {
1220 assert(tmp->rank() == Mutex::native ||
1221 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1222 }
1223 }
1224 }
1225 return res;
1226 }
1227
get_least_ranked_lock_besides_this(Monitor * locks)1228 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1229 Monitor *res, *tmp;
1230 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1231 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1232 res = tmp;
1233 }
1234 }
1235 if (!SafepointSynchronize::is_at_safepoint()) {
1236 // In this case, we expect the held locks to be
1237 // in increasing rank order (modulo any native ranks)
1238 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1239 if (tmp->next() != NULL) {
1240 assert(tmp->rank() == Mutex::native ||
1241 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1242 }
1243 }
1244 }
1245 return res;
1246 }
1247
1248
contains(Monitor * locks,Monitor * lock)1249 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1250 for (; locks != NULL; locks = locks->next()) {
1251 if (locks == lock)
1252 return true;
1253 }
1254 return false;
1255 }
1256 #endif
1257
1258 // Called immediately after lock acquisition or release as a diagnostic
1259 // to track the lock-set of the thread and test for rank violations that
1260 // might indicate exposure to deadlock.
1261 // Rather like an EventListener for _owner (:>).
1262
set_owner_implementation(Thread * new_owner)1263 void Monitor::set_owner_implementation(Thread *new_owner) {
1264 // This function is solely responsible for maintaining
1265 // and checking the invariant that threads and locks
1266 // are in a 1/N relation, with some some locks unowned.
1267 // It uses the Mutex::_owner, Mutex::_next, and
1268 // Thread::_owned_locks fields, and no other function
1269 // changes those fields.
1270 // It is illegal to set the mutex from one non-NULL
1271 // owner to another--it must be owned by NULL as an
1272 // intermediate state.
1273
1274 if (new_owner != NULL) {
1275 // the thread is acquiring this lock
1276
1277 assert(new_owner == Thread::current(), "Should I be doing this?");
1278 assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1279 _owner = new_owner; // set the owner
1280
1281 // link "this" into the owned locks list
1282
1283 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
1284 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1285 // Mutex::set_owner_implementation is a friend of Thread
1286
1287 assert(this->rank() >= 0, "bad lock rank");
1288
1289 // Deadlock avoidance rules require us to acquire Mutexes only in
1290 // a global total order. For example m1 is the lowest ranked mutex
1291 // that the thread holds and m2 is the mutex the thread is trying
1292 // to acquire, then deadlock avoidance rules require that the rank
1293 // of m2 be less than the rank of m1.
1294 // The rank Mutex::native is an exception in that it is not subject
1295 // to the verification rules.
1296 // Here are some further notes relating to mutex acquisition anomalies:
1297 // . under Solaris, the interrupt lock gets acquired when doing
1298 // profiling, so any lock could be held.
1299 // . it is also ok to acquire Safepoint_lock at the very end while we
1300 // already hold Terminator_lock - may happen because of periodic safepoints
1301 if (this->rank() != Mutex::native &&
1302 this->rank() != Mutex::suspend_resume &&
1303 locks != NULL && locks->rank() <= this->rank() &&
1304 !SafepointSynchronize::is_at_safepoint() &&
1305 this != Interrupt_lock && this != ProfileVM_lock &&
1306 !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1307 SafepointSynchronize::is_synchronizing())) {
1308 new_owner->print_owned_locks();
1309 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
1310 "possible deadlock", this->name(), this->rank(),
1311 locks->name(), locks->rank()));
1312 }
1313
1314 this->_next = new_owner->_owned_locks;
1315 new_owner->_owned_locks = this;
1316 #endif
1317
1318 } else {
1319 // the thread is releasing this lock
1320
1321 Thread* old_owner = _owner;
1322 debug_only(_last_owner = old_owner);
1323
1324 assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1325 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1326
1327 _owner = NULL; // set the owner
1328
1329 #ifdef ASSERT
1330 Monitor *locks = old_owner->owned_locks();
1331
1332 // remove "this" from the owned locks list
1333
1334 Monitor *prev = NULL;
1335 bool found = false;
1336 for (; locks != NULL; prev = locks, locks = locks->next()) {
1337 if (locks == this) {
1338 found = true;
1339 break;
1340 }
1341 }
1342 assert(found, "Removing a lock not owned");
1343 if (prev == NULL) {
1344 old_owner->_owned_locks = _next;
1345 } else {
1346 prev->_next = _next;
1347 }
1348 _next = NULL;
1349 #endif
1350 }
1351 }
1352
1353
1354 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
check_prelock_state(Thread * thread)1355 void Monitor::check_prelock_state(Thread *thread) {
1356 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1357 || rank() == Mutex::special, "wrong thread state for using locks");
1358 if (StrictSafepointChecks) {
1359 if (thread->is_VM_thread() && !allow_vm_block()) {
1360 fatal(err_msg("VM thread using lock %s (not allowed to block on)",
1361 name()));
1362 }
1363 debug_only(if (rank() != Mutex::special) \
1364 thread->check_for_valid_safepoint_state(false);)
1365 }
1366 assert(!os::ThreadCrashProtection::is_crash_protected(thread),
1367 "locking not allowed when crash protection is set");
1368 }
1369
check_block_state(Thread * thread)1370 void Monitor::check_block_state(Thread *thread) {
1371 if (!_allow_vm_block && thread->is_VM_thread()) {
1372 warning("VM thread blocked on lock");
1373 print();
1374 BREAKPOINT;
1375 }
1376 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1377 }
1378
1379 #endif // PRODUCT
1380