1 /*
2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "jfr/jfrEvents.hpp"
28 #include "jfr/support/jfrThreadId.hpp"
29 #include "logging/log.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/klass.inline.hpp"
32 #include "oops/markWord.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "runtime/atomic.hpp"
35 #include "runtime/basicLock.hpp"
36 #include "runtime/biasedLocking.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/handshake.hpp"
39 #include "runtime/safepointMechanism.hpp"
40 #include "runtime/task.hpp"
41 #include "runtime/threadSMR.hpp"
42 #include "runtime/vframe.hpp"
43 #include "runtime/vmThread.hpp"
44 #include "runtime/vmOperations.hpp"
45 
46 
47 static bool _biased_locking_enabled = false;
48 BiasedLockingCounters BiasedLocking::_counters;
49 
50 static GrowableArray<Handle>*   _preserved_oop_stack  = NULL;
51 static GrowableArray<markWord>* _preserved_mark_stack = NULL;
52 
enable_biased_locking(InstanceKlass * k)53 static void enable_biased_locking(InstanceKlass* k) {
54   k->set_prototype_header(markWord::biased_locking_prototype());
55 }
56 
enable_biased_locking()57 static void enable_biased_locking() {
58   _biased_locking_enabled = true;
59   log_info(biasedlocking)("Biased locking enabled");
60 }
61 
62 class VM_EnableBiasedLocking: public VM_Operation {
63  public:
VM_EnableBiasedLocking()64   VM_EnableBiasedLocking() {}
type() const65   VMOp_Type type() const          { return VMOp_EnableBiasedLocking; }
66 
doit()67   void doit() {
68     // Iterate the class loader data dictionaries enabling biased locking for all
69     // currently loaded classes.
70     ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking);
71     // Indicate that future instances should enable it as well
72     enable_biased_locking();
73   }
74 
allow_nested_vm_operations() const75   bool allow_nested_vm_operations() const        { return false; }
76 };
77 
78 
79 // One-shot PeriodicTask subclass for enabling biased locking
80 class EnableBiasedLockingTask : public PeriodicTask {
81  public:
EnableBiasedLockingTask(size_t interval_time)82   EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
83 
task()84   virtual void task() {
85     VM_EnableBiasedLocking op;
86     VMThread::execute(&op);
87 
88     // Reclaim our storage and disenroll ourself
89     delete this;
90   }
91 };
92 
93 
init()94 void BiasedLocking::init() {
95   // If biased locking is enabled and BiasedLockingStartupDelay is set,
96   // schedule a task to fire after the specified delay which turns on
97   // biased locking for all currently loaded classes as well as future
98   // ones. This could be a workaround for startup time regressions
99   // due to large number of safepoints being taken during VM startup for
100   // bias revocation.
101   if (UseBiasedLocking) {
102     if (BiasedLockingStartupDelay > 0) {
103       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
104       task->enroll();
105     } else {
106       enable_biased_locking();
107     }
108   }
109 }
110 
111 
enabled()112 bool BiasedLocking::enabled() {
113   assert(UseBiasedLocking, "precondition");
114   // We check "BiasedLockingStartupDelay == 0" here to cover the
115   // possibility of calls to BiasedLocking::enabled() before
116   // BiasedLocking::init().
117   return _biased_locking_enabled || BiasedLockingStartupDelay == 0;
118 }
119 
120 
121 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
get_or_compute_monitor_info(JavaThread * thread)122 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
123   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
124   if (info != NULL) {
125     return info;
126   }
127 
128   info = new GrowableArray<MonitorInfo*>();
129 
130   // It's possible for the thread to not have any Java frames on it,
131   // i.e., if it's the main thread and it's already returned from main()
132   if (thread->has_last_Java_frame()) {
133     RegisterMap rm(thread);
134     for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
135       GrowableArray<MonitorInfo*> *monitors = vf->monitors();
136       if (monitors != NULL) {
137         int len = monitors->length();
138         // Walk monitors youngest to oldest
139         for (int i = len - 1; i >= 0; i--) {
140           MonitorInfo* mon_info = monitors->at(i);
141           if (mon_info->eliminated()) continue;
142           oop owner = mon_info->owner();
143           if (owner != NULL) {
144             info->append(mon_info);
145           }
146         }
147       }
148     }
149   }
150 
151   thread->set_cached_monitor_info(info);
152   return info;
153 }
154 
155 
156 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
157 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
single_revoke_at_safepoint(oop obj,bool is_bulk,JavaThread * requesting_thread,JavaThread ** biased_locker)158 void BiasedLocking::single_revoke_at_safepoint(oop obj, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
159   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
160   assert(Thread::current()->is_VM_thread(), "must be VMThread");
161 
162   markWord mark = obj->mark();
163   if (!mark.has_bias_pattern()) {
164     if (log_is_enabled(Info, biasedlocking)) {
165       ResourceMark rm;
166       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
167                               ", mark " INTPTR_FORMAT ", type %s"
168                               ", requesting thread " INTPTR_FORMAT
169                               " because it's no longer biased)",
170                               p2i((void *)obj), mark.value(),
171                               obj->klass()->external_name(),
172                               (intptr_t) requesting_thread);
173     }
174     return;
175   }
176 
177   uint age = mark.age();
178   markWord unbiased_prototype = markWord::prototype().set_age(age);
179 
180   // Log at "info" level if not bulk, else "trace" level
181   if (!is_bulk) {
182     ResourceMark rm;
183     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
184                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
185                             ", requesting thread " INTPTR_FORMAT,
186                             p2i((void *)obj),
187                             mark.value(),
188                             obj->klass()->external_name(),
189                             obj->klass()->prototype_header().value(),
190                             (intptr_t) requesting_thread);
191   } else {
192     ResourceMark rm;
193     log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
194                              INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
195                              " , requesting thread " INTPTR_FORMAT,
196                              p2i((void *)obj),
197                              mark.value(),
198                              obj->klass()->external_name(),
199                              obj->klass()->prototype_header().value(),
200                              (intptr_t) requesting_thread);
201   }
202 
203   JavaThread* biased_thread = mark.biased_locker();
204   if (biased_thread == NULL) {
205     // Object is anonymously biased. We can get here if, for
206     // example, we revoke the bias due to an identity hash code
207     // being computed for an object.
208     obj->set_mark(unbiased_prototype);
209 
210     // Log at "info" level if not bulk, else "trace" level
211     if (!is_bulk) {
212       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
213     } else {
214       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
215     }
216     return;
217   }
218 
219   // Handle case where the thread toward which the object was biased has exited
220   bool thread_is_alive = false;
221   if (requesting_thread == biased_thread) {
222     thread_is_alive = true;
223   } else {
224     ThreadsListHandle tlh;
225     thread_is_alive = tlh.includes(biased_thread);
226   }
227   if (!thread_is_alive) {
228     obj->set_mark(unbiased_prototype);
229     // Log at "info" level if not bulk, else "trace" level
230     if (!is_bulk) {
231       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
232                               PTR_FORMAT ")", p2i(biased_thread));
233     } else {
234       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
235                                PTR_FORMAT ")", p2i(biased_thread));
236     }
237     return;
238   }
239 
240   // Log at "info" level if not bulk, else "trace" level
241   if (!is_bulk) {
242     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
243                             PTR_FORMAT ")", p2i(biased_thread));
244   } else {
245     log_trace(biasedlocking)("  Revoked bias of object biased toward live thread ("
246                                PTR_FORMAT ")", p2i(biased_thread));
247   }
248 
249   // Thread owning bias is alive.
250   // Check to see whether it currently owns the lock and, if so,
251   // write down the needed displaced headers to the thread's stack.
252   // Otherwise, restore the object's header either to the unlocked
253   // or unbiased state.
254   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
255   BasicLock* highest_lock = NULL;
256   for (int i = 0; i < cached_monitor_info->length(); i++) {
257     MonitorInfo* mon_info = cached_monitor_info->at(i);
258     if (mon_info->owner() == obj) {
259       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
260                                p2i((void *) mon_info->owner()),
261                                p2i((void *) obj));
262       // Assume recursive case and fix up highest lock below
263       markWord mark = markWord::encode((BasicLock*) NULL);
264       highest_lock = mon_info->lock();
265       highest_lock->set_displaced_header(mark);
266     } else {
267       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
268                                p2i((void *) mon_info->owner()),
269                                p2i((void *) obj));
270     }
271   }
272   if (highest_lock != NULL) {
273     // Fix up highest lock to contain displaced header and point
274     // object at it
275     highest_lock->set_displaced_header(unbiased_prototype);
276     // Reset object header to point to displaced mark.
277     // Must release store the lock address for platforms without TSO
278     // ordering (e.g. ppc).
279     obj->release_set_mark(markWord::encode(highest_lock));
280     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
281     // Log at "info" level if not bulk, else "trace" level
282     if (!is_bulk) {
283       log_info(biasedlocking)("  Revoked bias of currently-locked object");
284     } else {
285       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
286     }
287   } else {
288     // Log at "info" level if not bulk, else "trace" level
289     if (!is_bulk) {
290       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
291     } else {
292       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
293     }
294     // Store the unlocked value into the object's header.
295     obj->set_mark(unbiased_prototype);
296   }
297 
298   // If requested, return information on which thread held the bias
299   if (biased_locker != NULL) {
300     *biased_locker = biased_thread;
301   }
302 }
303 
304 
305 enum HeuristicsResult {
306   HR_NOT_BIASED    = 1,
307   HR_SINGLE_REVOKE = 2,
308   HR_BULK_REBIAS   = 3,
309   HR_BULK_REVOKE   = 4
310 };
311 
312 
update_heuristics(oop o)313 static HeuristicsResult update_heuristics(oop o) {
314   markWord mark = o->mark();
315   if (!mark.has_bias_pattern()) {
316     return HR_NOT_BIASED;
317   }
318 
319   // Heuristics to attempt to throttle the number of revocations.
320   // Stages:
321   // 1. Revoke the biases of all objects in the heap of this type,
322   //    but allow rebiasing of those objects if unlocked.
323   // 2. Revoke the biases of all objects in the heap of this type
324   //    and don't allow rebiasing of these objects. Disable
325   //    allocation of objects of that type with the bias bit set.
326   Klass* k = o->klass();
327   jlong cur_time = nanos_to_millis(os::javaTimeNanos());
328   jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
329   int revocation_count = k->biased_lock_revocation_count();
330   if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
331       (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
332       (last_bulk_revocation_time != 0) &&
333       (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
334     // This is the first revocation we've seen in a while of an
335     // object of this type since the last time we performed a bulk
336     // rebiasing operation. The application is allocating objects in
337     // bulk which are biased toward a thread and then handing them
338     // off to another thread. We can cope with this allocation
339     // pattern via the bulk rebiasing mechanism so we reset the
340     // klass's revocation count rather than allow it to increase
341     // monotonically. If we see the need to perform another bulk
342     // rebias operation later, we will, and if subsequently we see
343     // many more revocation operations in a short period of time we
344     // will completely disable biasing for this type.
345     k->set_biased_lock_revocation_count(0);
346     revocation_count = 0;
347   }
348 
349   // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
350   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
351     revocation_count = k->atomic_incr_biased_lock_revocation_count();
352   }
353 
354   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
355     return HR_BULK_REVOKE;
356   }
357 
358   if (revocation_count == BiasedLockingBulkRebiasThreshold) {
359     return HR_BULK_REBIAS;
360   }
361 
362   return HR_SINGLE_REVOKE;
363 }
364 
365 
bulk_revoke_at_safepoint(oop o,bool bulk_rebias,JavaThread * requesting_thread)366 void BiasedLocking::bulk_revoke_at_safepoint(oop o, bool bulk_rebias, JavaThread* requesting_thread) {
367   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
368   assert(Thread::current()->is_VM_thread(), "must be VMThread");
369 
370   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
371                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
372                           (bulk_rebias ? "rebias" : "revoke"),
373                           p2i((void *) o),
374                           o->mark().value(),
375                           o->klass()->external_name());
376 
377   jlong cur_time = nanos_to_millis(os::javaTimeNanos());
378   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
379 
380   Klass* k_o = o->klass();
381   Klass* klass = k_o;
382 
383   {
384     JavaThreadIteratorWithHandle jtiwh;
385 
386     if (bulk_rebias) {
387       // Use the epoch in the klass of the object to implicitly revoke
388       // all biases of objects of this data type and force them to be
389       // reacquired. However, we also need to walk the stacks of all
390       // threads and update the headers of lightweight locked objects
391       // with biases to have the current epoch.
392 
393       // If the prototype header doesn't have the bias pattern, don't
394       // try to update the epoch -- assume another VM operation came in
395       // and reset the header to the unbiased state, which will
396       // implicitly cause all existing biases to be revoked
397       if (klass->prototype_header().has_bias_pattern()) {
398         int prev_epoch = klass->prototype_header().bias_epoch();
399         klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
400         int cur_epoch = klass->prototype_header().bias_epoch();
401 
402         // Now walk all threads' stacks and adjust epochs of any biased
403         // and locked objects of this data type we encounter
404         for (; JavaThread *thr = jtiwh.next(); ) {
405           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
406           for (int i = 0; i < cached_monitor_info->length(); i++) {
407             MonitorInfo* mon_info = cached_monitor_info->at(i);
408             oop owner = mon_info->owner();
409             markWord mark = owner->mark();
410             if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
411               // We might have encountered this object already in the case of recursive locking
412               assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
413               owner->set_mark(mark.set_bias_epoch(cur_epoch));
414             }
415           }
416         }
417       }
418 
419       // At this point we're done. All we have to do is potentially
420       // adjust the header of the given object to revoke its bias.
421       single_revoke_at_safepoint(o, true, requesting_thread, NULL);
422     } else {
423       if (log_is_enabled(Info, biasedlocking)) {
424         ResourceMark rm;
425         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
426       }
427 
428       // Disable biased locking for this data type. Not only will this
429       // cause future instances to not be biased, but existing biased
430       // instances will notice that this implicitly caused their biases
431       // to be revoked.
432       klass->set_prototype_header(markWord::prototype());
433 
434       // Now walk all threads' stacks and forcibly revoke the biases of
435       // any locked and biased objects of this data type we encounter.
436       for (; JavaThread *thr = jtiwh.next(); ) {
437         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
438         for (int i = 0; i < cached_monitor_info->length(); i++) {
439           MonitorInfo* mon_info = cached_monitor_info->at(i);
440           oop owner = mon_info->owner();
441           markWord mark = owner->mark();
442           if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
443             single_revoke_at_safepoint(owner, true, requesting_thread, NULL);
444           }
445         }
446       }
447 
448       // Must force the bias of the passed object to be forcibly revoked
449       // as well to ensure guarantees to callers
450       single_revoke_at_safepoint(o, true, requesting_thread, NULL);
451     }
452   } // ThreadsListHandle is destroyed here.
453 
454   log_info(biasedlocking)("* Ending bulk revocation");
455 
456   assert(!o->mark().has_bias_pattern(), "bug in bulk bias revocation");
457 }
458 
459 
clean_up_cached_monitor_info(JavaThread * thread=NULL)460 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
461   if (thread != NULL) {
462     thread->set_cached_monitor_info(NULL);
463   } else {
464     // Walk the thread list clearing out the cached monitors
465     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
466       thr->set_cached_monitor_info(NULL);
467     }
468   }
469 }
470 
471 
472 class VM_BulkRevokeBias : public VM_Operation {
473 private:
474   Handle* _obj;
475   JavaThread* _requesting_thread;
476   bool _bulk_rebias;
477   uint64_t _safepoint_id;
478 
479 public:
VM_BulkRevokeBias(Handle * obj,JavaThread * requesting_thread,bool bulk_rebias)480   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
481                     bool bulk_rebias)
482     : _obj(obj)
483     , _requesting_thread(requesting_thread)
484     , _bulk_rebias(bulk_rebias)
485     , _safepoint_id(0) {}
486 
type() const487   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
488 
doit()489   virtual void doit() {
490     BiasedLocking::bulk_revoke_at_safepoint((*_obj)(), _bulk_rebias, _requesting_thread);
491     _safepoint_id = SafepointSynchronize::safepoint_id();
492     clean_up_cached_monitor_info();
493   }
494 
is_bulk_rebias() const495   bool is_bulk_rebias() const {
496     return _bulk_rebias;
497   }
498 
safepoint_id() const499   uint64_t safepoint_id() const {
500     return _safepoint_id;
501   }
502 };
503 
504 
505 class RevokeOneBias : public HandshakeClosure {
506 protected:
507   Handle _obj;
508   JavaThread* _requesting_thread;
509   JavaThread* _biased_locker;
510   BiasedLocking::Condition _status_code;
511   traceid _biased_locker_id;
512   bool _executed;
513 
514 public:
RevokeOneBias(Handle obj,JavaThread * requesting_thread,JavaThread * biased_locker)515   RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
516     : HandshakeClosure("RevokeOneBias")
517     , _obj(obj)
518     , _requesting_thread(requesting_thread)
519     , _biased_locker(biased_locker)
520     , _status_code(BiasedLocking::NOT_BIASED)
521     , _biased_locker_id(0)
522     , _executed(false) {}
523 
executed()524   bool executed() { return _executed; }
525 
do_thread(Thread * target)526   void do_thread(Thread* target) {
527     assert(target == _biased_locker, "Wrong thread");
528     _executed = true;
529 
530     oop o = _obj();
531     markWord mark = o->mark();
532 
533     if (!mark.has_bias_pattern()) {
534       return;
535     }
536 
537     markWord prototype = o->klass()->prototype_header();
538     if (!prototype.has_bias_pattern()) {
539       // This object has a stale bias from before the handshake
540       // was requested. If we fail this race, the object's bias
541       // has been revoked by another thread so we simply return.
542       markWord biased_value = mark;
543       mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
544       assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked");
545       if (biased_value == mark) {
546         _status_code = BiasedLocking::BIAS_REVOKED;
547       }
548       return;
549     }
550 
551     if (_biased_locker == mark.biased_locker()) {
552       if (mark.bias_epoch() == prototype.bias_epoch()) {
553         // Epoch is still valid. This means biaser could be currently
554         // synchronized on this object. We must walk its stack looking
555         // for monitor records associated with this object and change
556         // them to be stack locks if any are found.
557         ResourceMark rm;
558         BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
559         _biased_locker->set_cached_monitor_info(NULL);
560         assert(!o->mark().has_bias_pattern(), "invariant");
561         _biased_locker_id = JFR_THREAD_ID(_biased_locker);
562         _status_code = BiasedLocking::BIAS_REVOKED;
563         return;
564       } else {
565         markWord biased_value = mark;
566         mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
567         if (mark == biased_value || !mark.has_bias_pattern()) {
568           assert(!o->mark().has_bias_pattern(), "should be revoked");
569           _status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
570           return;
571         }
572       }
573     }
574 
575     _status_code = BiasedLocking::NOT_REVOKED;
576   }
577 
status_code() const578   BiasedLocking::Condition status_code() const {
579     return _status_code;
580   }
581 
biased_locker() const582   traceid biased_locker() const {
583     return _biased_locker_id;
584   }
585 };
586 
587 
post_self_revocation_event(EventBiasedLockSelfRevocation * event,Klass * k)588 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
589   assert(event != NULL, "invariant");
590   assert(k != NULL, "invariant");
591   assert(event->should_commit(), "invariant");
592   event->set_lockClass(k);
593   event->commit();
594 }
595 
post_revocation_event(EventBiasedLockRevocation * event,Klass * k,RevokeOneBias * op)596 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, RevokeOneBias* op) {
597   assert(event != NULL, "invariant");
598   assert(k != NULL, "invariant");
599   assert(op != NULL, "invariant");
600   assert(event->should_commit(), "invariant");
601   event->set_lockClass(k);
602   event->set_safepointId(0);
603   event->set_previousOwner(op->biased_locker());
604   event->commit();
605 }
606 
post_class_revocation_event(EventBiasedLockClassRevocation * event,Klass * k,VM_BulkRevokeBias * op)607 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
608   assert(event != NULL, "invariant");
609   assert(k != NULL, "invariant");
610   assert(op != NULL, "invariant");
611   assert(event->should_commit(), "invariant");
612   event->set_revokedClass(k);
613   event->set_disableBiasing(!op->is_bulk_rebias());
614   event->set_safepointId(op->safepoint_id());
615   event->commit();
616 }
617 
618 
single_revoke_with_handshake(Handle obj,JavaThread * requester,JavaThread * biaser)619 BiasedLocking::Condition BiasedLocking::single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser) {
620 
621   EventBiasedLockRevocation event;
622   if (PrintBiasedLockingStatistics) {
623     Atomic::inc(handshakes_count_addr());
624   }
625   log_info(biasedlocking, handshake)("JavaThread " INTPTR_FORMAT " handshaking JavaThread "
626                                      INTPTR_FORMAT " to revoke object " INTPTR_FORMAT, p2i(requester),
627                                      p2i(biaser), p2i(obj()));
628 
629   RevokeOneBias revoke(obj, requester, biaser);
630   Handshake::execute(&revoke, biaser);
631   if (revoke.status_code() == NOT_REVOKED) {
632     return NOT_REVOKED;
633   }
634   if (revoke.executed()) {
635     log_info(biasedlocking, handshake)("Handshake revocation for object " INTPTR_FORMAT " succeeded. Bias was %srevoked",
636                                        p2i(obj()), (revoke.status_code() == BIAS_REVOKED ? "" : "already "));
637     if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
638       post_revocation_event(&event, obj->klass(), &revoke);
639     }
640     assert(!obj->mark().has_bias_pattern(), "invariant");
641     return revoke.status_code();
642   } else {
643     // Thread was not alive.
644     // Grab Threads_lock before manually trying to revoke bias. This avoids race with a newly
645     // created JavaThread (that happens to get the same memory address as biaser) synchronizing
646     // on this object.
647     {
648       MutexLocker ml(Threads_lock);
649       markWord mark = obj->mark();
650       // Check if somebody else was able to revoke it before biased thread exited.
651       if (!mark.has_bias_pattern()) {
652         return NOT_BIASED;
653       }
654       ThreadsListHandle tlh;
655       markWord prototype = obj->klass()->prototype_header();
656       if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() &&
657                                             prototype.bias_epoch() == mark.bias_epoch())) {
658         obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
659         if (event.should_commit()) {
660           post_revocation_event(&event, obj->klass(), &revoke);
661         }
662         assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now");
663         return BIAS_REVOKED;
664       }
665     }
666   }
667 
668   return NOT_REVOKED;
669 }
670 
671 
672 // Caller should have instantiated a ResourceMark object before calling this method
walk_stack_and_revoke(oop obj,JavaThread * biased_locker)673 void BiasedLocking::walk_stack_and_revoke(oop obj, JavaThread* biased_locker) {
674   Thread* cur = Thread::current();
675   assert(!SafepointSynchronize::is_at_safepoint(), "this should always be executed outside safepoints");
676   assert(biased_locker->is_handshake_safe_for(cur), "wrong thread");
677 
678   markWord mark = obj->mark();
679   assert(mark.biased_locker() == biased_locker &&
680          obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant");
681 
682   log_trace(biasedlocking)("JavaThread(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
683                            INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
684                            ", biaser " INTPTR_FORMAT " %s",
685                            p2i(cur),
686                            p2i(obj),
687                            mark.value(),
688                            obj->klass()->external_name(),
689                            obj->klass()->prototype_header().value(),
690                            p2i(biased_locker),
691                            cur != biased_locker ? "" : "(walking own stack)");
692 
693   markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age());
694 
695   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
696   BasicLock* highest_lock = NULL;
697   for (int i = 0; i < cached_monitor_info->length(); i++) {
698     MonitorInfo* mon_info = cached_monitor_info->at(i);
699     if (mon_info->owner() == obj) {
700       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
701                                p2i(mon_info->owner()),
702                                p2i(obj));
703       // Assume recursive case and fix up highest lock below
704       markWord mark = markWord::encode((BasicLock*) NULL);
705       highest_lock = mon_info->lock();
706       highest_lock->set_displaced_header(mark);
707     } else {
708       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
709                                p2i(mon_info->owner()),
710                                p2i(obj));
711     }
712   }
713   if (highest_lock != NULL) {
714     // Fix up highest lock to contain displaced header and point
715     // object at it
716     highest_lock->set_displaced_header(unbiased_prototype);
717     // Reset object header to point to displaced mark.
718     // Must release store the lock address for platforms without TSO
719     // ordering (e.g. ppc).
720     obj->release_set_mark(markWord::encode(highest_lock));
721     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
722     log_info(biasedlocking)("  Revoked bias of currently-locked object");
723   } else {
724     log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
725     // Store the unlocked value into the object's header.
726     obj->set_mark(unbiased_prototype);
727   }
728 
729   assert(!obj->mark().has_bias_pattern(), "must not be biased");
730 }
731 
revoke_own_lock(Handle obj,TRAPS)732 void BiasedLocking::revoke_own_lock(Handle obj, TRAPS) {
733   JavaThread* thread = THREAD->as_Java_thread();
734 
735   markWord mark = obj->mark();
736 
737   if (!mark.has_bias_pattern()) {
738     return;
739   }
740 
741   Klass *k = obj->klass();
742   assert(mark.biased_locker() == thread &&
743          k->prototype_header().bias_epoch() == mark.bias_epoch(), "Revoke failed, unhandled biased lock state");
744   ResourceMark rm;
745   log_info(biasedlocking)("Revoking bias by walking my own stack:");
746   EventBiasedLockSelfRevocation event;
747   BiasedLocking::walk_stack_and_revoke(obj(), thread);
748   thread->set_cached_monitor_info(NULL);
749   assert(!obj->mark().has_bias_pattern(), "invariant");
750   if (event.should_commit()) {
751     post_self_revocation_event(&event, k);
752   }
753 }
754 
revoke(Handle obj,TRAPS)755 void BiasedLocking::revoke(Handle obj, TRAPS) {
756   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
757 
758   while (true) {
759     // We can revoke the biases of anonymously-biased objects
760     // efficiently enough that we should not cause these revocations to
761     // update the heuristics because doing so may cause unwanted bulk
762     // revocations (which are expensive) to occur.
763     markWord mark = obj->mark();
764 
765     if (!mark.has_bias_pattern()) {
766       return;
767     }
768 
769     if (mark.is_biased_anonymously()) {
770       // We are probably trying to revoke the bias of this object due to
771       // an identity hash code computation. Try to revoke the bias
772       // without a safepoint. This is possible if we can successfully
773       // compare-and-exchange an unbiased header into the mark word of
774       // the object, meaning that no other thread has raced to acquire
775       // the bias of the object.
776       markWord biased_value       = mark;
777       markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
778       markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
779       if (res_mark == biased_value) {
780         return;
781       }
782       mark = res_mark;  // Refresh mark with the latest value.
783     } else {
784       Klass* k = obj->klass();
785       markWord prototype_header = k->prototype_header();
786       if (!prototype_header.has_bias_pattern()) {
787         // This object has a stale bias from before the bulk revocation
788         // for this data type occurred. It's pointless to update the
789         // heuristics at this point so simply update the header with a
790         // CAS. If we fail this race, the object's bias has been revoked
791         // by another thread so we simply return and let the caller deal
792         // with it.
793         obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
794         assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
795         return;
796       } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
797         // The epoch of this biasing has expired indicating that the
798         // object is effectively unbiased. We can revoke the bias of this
799         // object efficiently enough with a CAS that we shouldn't update the
800         // heuristics. This is normally done in the assembly code but we
801         // can reach this point due to various points in the runtime
802         // needing to revoke biases.
803         markWord res_mark;
804         markWord biased_value       = mark;
805         markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
806         res_mark = obj->cas_set_mark(unbiased_prototype, mark);
807         if (res_mark == biased_value) {
808           return;
809         }
810         mark = res_mark;  // Refresh mark with the latest value.
811       }
812     }
813 
814     HeuristicsResult heuristics = update_heuristics(obj());
815     if (heuristics == HR_NOT_BIASED) {
816       return;
817     } else if (heuristics == HR_SINGLE_REVOKE) {
818       JavaThread *blt = mark.biased_locker();
819       assert(blt != NULL, "invariant");
820       if (blt == THREAD) {
821         // A thread is trying to revoke the bias of an object biased
822         // toward it, again likely due to an identity hash code
823         // computation. We can again avoid a safepoint/handshake in this case
824         // since we are only going to walk our own stack. There are no
825         // races with revocations occurring in other threads because we
826         // reach no safepoints in the revocation path.
827         EventBiasedLockSelfRevocation event;
828         ResourceMark rm;
829         walk_stack_and_revoke(obj(), blt);
830         blt->set_cached_monitor_info(NULL);
831         assert(!obj->mark().has_bias_pattern(), "invariant");
832         if (event.should_commit()) {
833           post_self_revocation_event(&event, obj->klass());
834         }
835         return;
836       } else {
837         BiasedLocking::Condition cond = single_revoke_with_handshake(obj, THREAD->as_Java_thread(), blt);
838         if (cond != NOT_REVOKED) {
839           return;
840         }
841       }
842     } else {
843       assert((heuristics == HR_BULK_REVOKE) ||
844          (heuristics == HR_BULK_REBIAS), "?");
845       EventBiasedLockClassRevocation event;
846       VM_BulkRevokeBias bulk_revoke(&obj, THREAD->as_Java_thread(),
847                                     (heuristics == HR_BULK_REBIAS));
848       VMThread::execute(&bulk_revoke);
849       if (event.should_commit()) {
850         post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
851       }
852       return;
853     }
854   }
855 }
856 
857 // All objects in objs should be locked by biaser
revoke(GrowableArray<Handle> * objs,JavaThread * biaser)858 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
859   bool clean_my_cache = false;
860   for (int i = 0; i < objs->length(); i++) {
861     oop obj = (objs->at(i))();
862     markWord mark = obj->mark();
863     if (mark.has_bias_pattern()) {
864       walk_stack_and_revoke(obj, biaser);
865       clean_my_cache = true;
866     }
867   }
868   if (clean_my_cache) {
869     clean_up_cached_monitor_info(biaser);
870   }
871 }
872 
873 
revoke_at_safepoint(Handle h_obj)874 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
875   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
876   oop obj = h_obj();
877   HeuristicsResult heuristics = update_heuristics(obj);
878   if (heuristics == HR_SINGLE_REVOKE) {
879     JavaThread* biased_locker = NULL;
880     single_revoke_at_safepoint(obj, false, NULL, &biased_locker);
881     if (biased_locker) {
882       clean_up_cached_monitor_info(biased_locker);
883     }
884   } else if ((heuristics == HR_BULK_REBIAS) ||
885              (heuristics == HR_BULK_REVOKE)) {
886     bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL);
887     clean_up_cached_monitor_info();
888   }
889 }
890 
891 
preserve_marks()892 void BiasedLocking::preserve_marks() {
893   if (!UseBiasedLocking)
894     return;
895 
896   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
897 
898   assert(_preserved_oop_stack  == NULL, "double initialization");
899   assert(_preserved_mark_stack == NULL, "double initialization");
900 
901   // In order to reduce the number of mark words preserved during GC
902   // due to the presence of biased locking, we reinitialize most mark
903   // words to the class's prototype during GC -- even those which have
904   // a currently valid bias owner. One important situation where we
905   // must not clobber a bias is when a biased object is currently
906   // locked. To handle this case we iterate over the currently-locked
907   // monitors in a prepass and, if they are biased, preserve their
908   // mark words here. This should be a relatively small set of objects
909   // especially compared to the number of objects in the heap.
910   _preserved_mark_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<markWord>(10, mtGC);
911   _preserved_oop_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<Handle>(10, mtGC);
912 
913   Thread* cur = Thread::current();
914   ResourceMark rm(cur);
915 
916   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
917     if (thread->has_last_Java_frame()) {
918       RegisterMap rm(thread);
919       for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
920         GrowableArray<MonitorInfo*> *monitors = vf->monitors();
921         if (monitors != NULL) {
922           int len = monitors->length();
923           // Walk monitors youngest to oldest
924           for (int i = len - 1; i >= 0; i--) {
925             MonitorInfo* mon_info = monitors->at(i);
926             if (mon_info->owner_is_scalar_replaced()) continue;
927             oop owner = mon_info->owner();
928             if (owner != NULL) {
929               markWord mark = owner->mark();
930               if (mark.has_bias_pattern()) {
931                 _preserved_oop_stack->push(Handle(cur, owner));
932                 _preserved_mark_stack->push(mark);
933               }
934             }
935           }
936         }
937       }
938     }
939   }
940 }
941 
942 
restore_marks()943 void BiasedLocking::restore_marks() {
944   if (!UseBiasedLocking)
945     return;
946 
947   assert(_preserved_oop_stack  != NULL, "double free");
948   assert(_preserved_mark_stack != NULL, "double free");
949 
950   int len = _preserved_oop_stack->length();
951   for (int i = 0; i < len; i++) {
952     Handle owner = _preserved_oop_stack->at(i);
953     markWord mark = _preserved_mark_stack->at(i);
954     owner->set_mark(mark);
955   }
956 
957   delete _preserved_oop_stack;
958   _preserved_oop_stack = NULL;
959   delete _preserved_mark_stack;
960   _preserved_mark_stack = NULL;
961 }
962 
963 
total_entry_count_addr()964 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
biased_lock_entry_count_addr()965 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
anonymously_biased_lock_entry_count_addr()966 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
rebiased_lock_entry_count_addr()967 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
revoked_lock_entry_count_addr()968 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
handshakes_count_addr()969 int* BiasedLocking::handshakes_count_addr()                    { return _counters.handshakes_count_addr(); }
fast_path_entry_count_addr()970 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
slow_path_entry_count_addr()971 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
972 
973 
974 // BiasedLockingCounters
975 
slow_path_entry_count() const976 int BiasedLockingCounters::slow_path_entry_count() const {
977   if (_slow_path_entry_count != 0) {
978     return _slow_path_entry_count;
979   }
980   int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
981             _rebiased_lock_entry_count + _revoked_lock_entry_count +
982             _fast_path_entry_count;
983 
984   return _total_entry_count - sum;
985 }
986 
print_on(outputStream * st) const987 void BiasedLockingCounters::print_on(outputStream* st) const {
988   tty->print_cr("# total entries: %d", _total_entry_count);
989   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
990   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
991   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
992   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
993   tty->print_cr("# handshakes entries: %d", _handshakes_count);
994   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
995   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
996 }
997 
print() const998 void BiasedLockingCounters::print() const { print_on(tty); }
999