1 /*
2  * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/heapInspection.hpp"
29 #include "memory/oopFactory.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/instanceKlass.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/objArrayOop.inline.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/jvmtiRawMonitor.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/init.hpp"
39 #include "runtime/objectMonitor.inline.hpp"
40 #include "runtime/thread.inline.hpp"
41 #include "runtime/threadSMR.inline.hpp"
42 #include "runtime/vframe.hpp"
43 #include "runtime/vmThread.hpp"
44 #include "runtime/vmOperations.hpp"
45 #include "services/threadService.hpp"
46 
47 // TODO: we need to define a naming convention for perf counters
48 // to distinguish counters for:
49 //   - standard JSR174 use
50 //   - Hotspot extension (public and committed)
51 //   - Hotspot extension (private/internal and uncommitted)
52 
53 // Default is disabled.
54 bool ThreadService::_thread_monitoring_contention_enabled = false;
55 bool ThreadService::_thread_cpu_time_enabled = false;
56 bool ThreadService::_thread_allocated_memory_enabled = false;
57 
58 PerfCounter*  ThreadService::_total_threads_count = NULL;
59 PerfVariable* ThreadService::_live_threads_count = NULL;
60 PerfVariable* ThreadService::_peak_threads_count = NULL;
61 PerfVariable* ThreadService::_daemon_threads_count = NULL;
62 volatile int ThreadService::_atomic_threads_count = 0;
63 volatile int ThreadService::_atomic_daemon_threads_count = 0;
64 
65 ThreadDumpResult* ThreadService::_threaddump_list = NULL;
66 
67 static const int INITIAL_ARRAY_SIZE = 10;
68 
init()69 void ThreadService::init() {
70   EXCEPTION_MARK;
71 
72   // These counters are for java.lang.management API support.
73   // They are created even if -XX:-UsePerfData is set and in
74   // that case, they will be allocated on C heap.
75 
76   _total_threads_count =
77                 PerfDataManager::create_counter(JAVA_THREADS, "started",
78                                                 PerfData::U_Events, CHECK);
79 
80   _live_threads_count =
81                 PerfDataManager::create_variable(JAVA_THREADS, "live",
82                                                  PerfData::U_None, CHECK);
83 
84   _peak_threads_count =
85                 PerfDataManager::create_variable(JAVA_THREADS, "livePeak",
86                                                  PerfData::U_None, CHECK);
87 
88   _daemon_threads_count =
89                 PerfDataManager::create_variable(JAVA_THREADS, "daemon",
90                                                  PerfData::U_None, CHECK);
91 
92   if (os::is_thread_cpu_time_supported()) {
93     _thread_cpu_time_enabled = true;
94   }
95 
96   _thread_allocated_memory_enabled = true; // Always on, so enable it
97 }
98 
reset_peak_thread_count()99 void ThreadService::reset_peak_thread_count() {
100   // Acquire the lock to update the peak thread count
101   // to synchronize with thread addition and removal.
102   MutexLocker mu(Threads_lock);
103   _peak_threads_count->set_value(get_live_thread_count());
104 }
105 
is_hidden_thread(JavaThread * thread)106 static bool is_hidden_thread(JavaThread *thread) {
107   // hide VM internal or JVMTI agent threads
108   return thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread();
109 }
110 
add_thread(JavaThread * thread,bool daemon)111 void ThreadService::add_thread(JavaThread* thread, bool daemon) {
112   assert(Threads_lock->owned_by_self(), "must have threads lock");
113 
114   // Do not count hidden threads
115   if (is_hidden_thread(thread)) {
116     return;
117   }
118 
119   _total_threads_count->inc();
120   _live_threads_count->inc();
121   Atomic::inc(&_atomic_threads_count);
122   int count = _atomic_threads_count;
123 
124   if (count > _peak_threads_count->get_value()) {
125     _peak_threads_count->set_value(count);
126   }
127 
128   if (daemon) {
129     _daemon_threads_count->inc();
130     Atomic::inc(&_atomic_daemon_threads_count);
131   }
132 }
133 
decrement_thread_counts(JavaThread * jt,bool daemon)134 void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) {
135   Atomic::dec(&_atomic_threads_count);
136 
137   if (daemon) {
138     Atomic::dec(&_atomic_daemon_threads_count);
139   }
140 }
141 
remove_thread(JavaThread * thread,bool daemon)142 void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
143   assert(Threads_lock->owned_by_self(), "must have threads lock");
144 
145   // Do not count hidden threads
146   if (is_hidden_thread(thread)) {
147     return;
148   }
149 
150   assert(!thread->is_terminated(), "must not be terminated");
151   if (!thread->is_exiting()) {
152     // JavaThread::exit() skipped calling current_thread_exiting()
153     decrement_thread_counts(thread, daemon);
154   }
155 
156   int daemon_count = _atomic_daemon_threads_count;
157   int count = _atomic_threads_count;
158 
159   // Counts are incremented at the same time, but atomic counts are
160   // decremented earlier than perf counts.
161   assert(_live_threads_count->get_value() > count,
162     "thread count mismatch %d : %d",
163     (int)_live_threads_count->get_value(), count);
164 
165   _live_threads_count->dec(1);
166   if (daemon) {
167     assert(_daemon_threads_count->get_value() > daemon_count,
168       "thread count mismatch %d : %d",
169       (int)_daemon_threads_count->get_value(), daemon_count);
170 
171     _daemon_threads_count->dec(1);
172   }
173 
174   // Counts are incremented at the same time, but atomic counts are
175   // decremented earlier than perf counts.
176   assert(_daemon_threads_count->get_value() >= daemon_count,
177     "thread count mismatch %d : %d",
178     (int)_daemon_threads_count->get_value(), daemon_count);
179   assert(_live_threads_count->get_value() >= count,
180     "thread count mismatch %d : %d",
181     (int)_live_threads_count->get_value(), count);
182   assert(_live_threads_count->get_value() > 0 ||
183     (_live_threads_count->get_value() == 0 && count == 0 &&
184     _daemon_threads_count->get_value() == 0 && daemon_count == 0),
185     "thread counts should reach 0 at the same time, live %d,%d daemon %d,%d",
186     (int)_live_threads_count->get_value(), count,
187     (int)_daemon_threads_count->get_value(), daemon_count);
188   assert(_daemon_threads_count->get_value() > 0 ||
189     (_daemon_threads_count->get_value() == 0 && daemon_count == 0),
190     "thread counts should reach 0 at the same time, daemon %d,%d",
191     (int)_daemon_threads_count->get_value(), daemon_count);
192 }
193 
current_thread_exiting(JavaThread * jt,bool daemon)194 void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) {
195   // Do not count hidden threads
196   if (is_hidden_thread(jt)) {
197     return;
198   }
199 
200   assert(jt == JavaThread::current(), "Called by current thread");
201   assert(!jt->is_terminated() && jt->is_exiting(), "must be exiting");
202 
203   decrement_thread_counts(jt, daemon);
204 }
205 
206 // FIXME: JVMTI should call this function
get_current_contended_monitor(JavaThread * thread)207 Handle ThreadService::get_current_contended_monitor(JavaThread* thread) {
208   assert(thread != NULL, "should be non-NULL");
209   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
210 
211   // This function can be called on a target JavaThread that is not
212   // the caller and we are not at a safepoint. So it is possible for
213   // the waiting or pending condition to be over/stale and for the
214   // first stage of async deflation to clear the object field in
215   // the ObjectMonitor. It is also possible for the object to be
216   // inflated again and to be associated with a completely different
217   // ObjectMonitor by the time this object reference is processed
218   // by the caller.
219   ObjectMonitor *wait_obj = thread->current_waiting_monitor();
220 
221   oop obj = NULL;
222   if (wait_obj != NULL) {
223     // thread is doing an Object.wait() call
224     obj = (oop) wait_obj->object();
225     assert(AsyncDeflateIdleMonitors || obj != NULL, "Object.wait() should have an object");
226   } else {
227     ObjectMonitor *enter_obj = thread->current_pending_monitor();
228     if (enter_obj != NULL) {
229       // thread is trying to enter() an ObjectMonitor.
230       obj = (oop) enter_obj->object();
231       assert(AsyncDeflateIdleMonitors || obj != NULL, "ObjectMonitor should have an associated object!");
232     }
233   }
234 
235   Handle h(Thread::current(), obj);
236   return h;
237 }
238 
set_thread_monitoring_contention(bool flag)239 bool ThreadService::set_thread_monitoring_contention(bool flag) {
240   MutexLocker m(Management_lock);
241 
242   bool prev = _thread_monitoring_contention_enabled;
243   _thread_monitoring_contention_enabled = flag;
244 
245   return prev;
246 }
247 
set_thread_cpu_time_enabled(bool flag)248 bool ThreadService::set_thread_cpu_time_enabled(bool flag) {
249   MutexLocker m(Management_lock);
250 
251   bool prev = _thread_cpu_time_enabled;
252   _thread_cpu_time_enabled = flag;
253 
254   return prev;
255 }
256 
set_thread_allocated_memory_enabled(bool flag)257 bool ThreadService::set_thread_allocated_memory_enabled(bool flag) {
258   MutexLocker m(Management_lock);
259 
260   bool prev = _thread_allocated_memory_enabled;
261   _thread_allocated_memory_enabled = flag;
262 
263   return prev;
264 }
265 
266 // GC support
oops_do(OopClosure * f)267 void ThreadService::oops_do(OopClosure* f) {
268   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
269     dump->oops_do(f);
270   }
271 }
272 
metadata_do(void f (Metadata *))273 void ThreadService::metadata_do(void f(Metadata*)) {
274   for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) {
275     dump->metadata_do(f);
276   }
277 }
278 
add_thread_dump(ThreadDumpResult * dump)279 void ThreadService::add_thread_dump(ThreadDumpResult* dump) {
280   MutexLocker ml(Management_lock);
281   if (_threaddump_list == NULL) {
282     _threaddump_list = dump;
283   } else {
284     dump->set_next(_threaddump_list);
285     _threaddump_list = dump;
286   }
287 }
288 
remove_thread_dump(ThreadDumpResult * dump)289 void ThreadService::remove_thread_dump(ThreadDumpResult* dump) {
290   MutexLocker ml(Management_lock);
291 
292   ThreadDumpResult* prev = NULL;
293   bool found = false;
294   for (ThreadDumpResult* d = _threaddump_list; d != NULL; prev = d, d = d->next()) {
295     if (d == dump) {
296       if (prev == NULL) {
297         _threaddump_list = dump->next();
298       } else {
299         prev->set_next(dump->next());
300       }
301       found = true;
302       break;
303     }
304   }
305   assert(found, "The threaddump result to be removed must exist.");
306 }
307 
308 // Dump stack trace of threads specified in the given threads array.
309 // Returns StackTraceElement[][] each element is the stack trace of a thread in
310 // the corresponding entry in the given threads array
dump_stack_traces(GrowableArray<instanceHandle> * threads,int num_threads,TRAPS)311 Handle ThreadService::dump_stack_traces(GrowableArray<instanceHandle>* threads,
312                                         int num_threads,
313                                         TRAPS) {
314   assert(num_threads > 0, "just checking");
315 
316   ThreadDumpResult dump_result;
317   VM_ThreadDump op(&dump_result,
318                    threads,
319                    num_threads,
320                    -1,    /* entire stack */
321                    false, /* with locked monitors */
322                    false  /* with locked synchronizers */);
323   VMThread::execute(&op);
324 
325   // Allocate the resulting StackTraceElement[][] object
326 
327   ResourceMark rm(THREAD);
328   Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_StackTraceElement_array(), true, CHECK_NH);
329   ObjArrayKlass* ik = ObjArrayKlass::cast(k);
330   objArrayOop r = oopFactory::new_objArray(ik, num_threads, CHECK_NH);
331   objArrayHandle result_obj(THREAD, r);
332 
333   int num_snapshots = dump_result.num_snapshots();
334   assert(num_snapshots == num_threads, "Must have num_threads thread snapshots");
335   assert(num_snapshots == 0 || dump_result.t_list_has_been_set(), "ThreadsList must have been set if we have a snapshot");
336   int i = 0;
337   for (ThreadSnapshot* ts = dump_result.snapshots(); ts != NULL; i++, ts = ts->next()) {
338     ThreadStackTrace* stacktrace = ts->get_stack_trace();
339     if (stacktrace == NULL) {
340       // No stack trace
341       result_obj->obj_at_put(i, NULL);
342     } else {
343       // Construct an array of java/lang/StackTraceElement object
344       Handle backtrace_h = stacktrace->allocate_fill_stack_trace_element_array(CHECK_NH);
345       result_obj->obj_at_put(i, backtrace_h());
346     }
347   }
348 
349   return result_obj;
350 }
351 
reset_contention_count_stat(JavaThread * thread)352 void ThreadService::reset_contention_count_stat(JavaThread* thread) {
353   ThreadStatistics* stat = thread->get_thread_stat();
354   if (stat != NULL) {
355     stat->reset_count_stat();
356   }
357 }
358 
reset_contention_time_stat(JavaThread * thread)359 void ThreadService::reset_contention_time_stat(JavaThread* thread) {
360   ThreadStatistics* stat = thread->get_thread_stat();
361   if (stat != NULL) {
362     stat->reset_time_stat();
363   }
364 }
365 
366 // Find deadlocks involving raw monitors, object monitors and concurrent locks
367 // if concurrent_locks is true.
find_deadlocks_at_safepoint(ThreadsList * t_list,bool concurrent_locks)368 DeadlockCycle* ThreadService::find_deadlocks_at_safepoint(ThreadsList * t_list, bool concurrent_locks) {
369   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
370 
371   // This code was modified from the original Threads::find_deadlocks code.
372   int globalDfn = 0, thisDfn;
373   ObjectMonitor* waitingToLockMonitor = NULL;
374   JvmtiRawMonitor* waitingToLockRawMonitor = NULL;
375   oop waitingToLockBlocker = NULL;
376   bool blocked_on_monitor = false;
377   JavaThread *currentThread, *previousThread;
378   int num_deadlocks = 0;
379 
380   // Initialize the depth-first-number for each JavaThread.
381   JavaThreadIterator jti(t_list);
382   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
383     jt->set_depth_first_number(-1);
384   }
385 
386   DeadlockCycle* deadlocks = NULL;
387   DeadlockCycle* last = NULL;
388   DeadlockCycle* cycle = new DeadlockCycle();
389   for (JavaThread* jt = jti.first(); jt != NULL; jt = jti.next()) {
390     if (jt->depth_first_number() >= 0) {
391       // this thread was already visited
392       continue;
393     }
394 
395     thisDfn = globalDfn;
396     jt->set_depth_first_number(globalDfn++);
397     previousThread = jt;
398     currentThread = jt;
399 
400     cycle->reset();
401 
402     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
403     // When there is a deadlock, all the monitors involved in the dependency
404     // cycle must be contended and heavyweight. So we only care about the
405     // heavyweight monitor a thread is waiting to lock.
406     waitingToLockMonitor = jt->current_pending_monitor();
407     // JVM TI raw monitors can also be involved in deadlocks, and we can be
408     // waiting to lock both a raw monitor and ObjectMonitor at the same time.
409     // It isn't clear how to make deadlock detection work correctly if that
410     // happens.
411     waitingToLockRawMonitor = jt->current_pending_raw_monitor();
412 
413     if (concurrent_locks) {
414       waitingToLockBlocker = jt->current_park_blocker();
415     }
416 
417     while (waitingToLockMonitor != NULL ||
418            waitingToLockRawMonitor != NULL ||
419            waitingToLockBlocker != NULL) {
420       cycle->add_thread(currentThread);
421       // Give preference to the raw monitor
422       if (waitingToLockRawMonitor != NULL) {
423         Thread* owner = waitingToLockRawMonitor->owner();
424         if (owner != NULL && // the raw monitor could be released at any time
425             owner->is_Java_thread()) {
426           // only JavaThreads can be reported here
427           currentThread = (JavaThread*) owner;
428         }
429       } else if (waitingToLockMonitor != NULL) {
430         address currentOwner = (address)waitingToLockMonitor->owner();
431         if (currentOwner != NULL) {
432           currentThread = Threads::owning_thread_from_monitor_owner(t_list,
433                                                                     currentOwner);
434           if (currentThread == NULL) {
435             // This function is called at a safepoint so the JavaThread
436             // that owns waitingToLockMonitor should be findable, but
437             // if it is not findable, then the previous currentThread is
438             // blocked permanently. We record this as a deadlock.
439             num_deadlocks++;
440 
441             cycle->set_deadlock(true);
442 
443             // add this cycle to the deadlocks list
444             if (deadlocks == NULL) {
445               deadlocks = cycle;
446             } else {
447               last->set_next(cycle);
448             }
449             last = cycle;
450             cycle = new DeadlockCycle();
451             break;
452           }
453         }
454       } else {
455         if (concurrent_locks) {
456           if (waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
457             oop threadObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
458             // This JavaThread (if there is one) is protected by the
459             // ThreadsListSetter in VM_FindDeadlocks::doit().
460             currentThread = threadObj != NULL ? java_lang_Thread::thread(threadObj) : NULL;
461           } else {
462             currentThread = NULL;
463           }
464         }
465       }
466 
467       if (currentThread == NULL) {
468         // No dependency on another thread
469         break;
470       }
471       if (currentThread->depth_first_number() < 0) {
472         // First visit to this thread
473         currentThread->set_depth_first_number(globalDfn++);
474       } else if (currentThread->depth_first_number() < thisDfn) {
475         // Thread already visited, and not on a (new) cycle
476         break;
477       } else if (currentThread == previousThread) {
478         // Self-loop, ignore
479         break;
480       } else {
481         // We have a (new) cycle
482         num_deadlocks++;
483 
484         cycle->set_deadlock(true);
485 
486         // add this cycle to the deadlocks list
487         if (deadlocks == NULL) {
488           deadlocks = cycle;
489         } else {
490           last->set_next(cycle);
491         }
492         last = cycle;
493         cycle = new DeadlockCycle();
494         break;
495       }
496       previousThread = currentThread;
497       waitingToLockMonitor = (ObjectMonitor*)currentThread->current_pending_monitor();
498       if (concurrent_locks) {
499         waitingToLockBlocker = currentThread->current_park_blocker();
500       }
501     }
502 
503   }
504   delete cycle;
505   return deadlocks;
506 }
507 
ThreadDumpResult()508 ThreadDumpResult::ThreadDumpResult() : _num_threads(0), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
509 
510   // Create a new ThreadDumpResult object and append to the list.
511   // If GC happens before this function returns, Method*
512   // in the stack trace will be visited.
513   ThreadService::add_thread_dump(this);
514 }
515 
ThreadDumpResult(int num_threads)516 ThreadDumpResult::ThreadDumpResult(int num_threads) : _num_threads(num_threads), _num_snapshots(0), _snapshots(NULL), _last(NULL), _next(NULL), _setter() {
517   // Create a new ThreadDumpResult object and append to the list.
518   // If GC happens before this function returns, oops
519   // will be visited.
520   ThreadService::add_thread_dump(this);
521 }
522 
~ThreadDumpResult()523 ThreadDumpResult::~ThreadDumpResult() {
524   ThreadService::remove_thread_dump(this);
525 
526   // free all the ThreadSnapshot objects created during
527   // the VM_ThreadDump operation
528   ThreadSnapshot* ts = _snapshots;
529   while (ts != NULL) {
530     ThreadSnapshot* p = ts;
531     ts = ts->next();
532     delete p;
533   }
534 }
535 
add_thread_snapshot()536 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot() {
537   ThreadSnapshot* ts = new ThreadSnapshot();
538   link_thread_snapshot(ts);
539   return ts;
540 }
541 
add_thread_snapshot(JavaThread * thread)542 ThreadSnapshot* ThreadDumpResult::add_thread_snapshot(JavaThread* thread) {
543   // Note: it is very important that the ThreadSnapshot* gets linked before
544   // ThreadSnapshot::initialize gets called. This is to ensure that
545   // ThreadSnapshot::oops_do can get called prior to the field
546   // ThreadSnapshot::_threadObj being assigned a value (to prevent a dangling
547   // oop).
548   ThreadSnapshot* ts = new ThreadSnapshot();
549   link_thread_snapshot(ts);
550   ts->initialize(t_list(), thread);
551   return ts;
552 }
553 
link_thread_snapshot(ThreadSnapshot * ts)554 void ThreadDumpResult::link_thread_snapshot(ThreadSnapshot* ts) {
555   assert(_num_threads == 0 || _num_snapshots < _num_threads,
556          "_num_snapshots must be less than _num_threads");
557   _num_snapshots++;
558   if (_snapshots == NULL) {
559     _snapshots = ts;
560   } else {
561     _last->set_next(ts);
562   }
563   _last = ts;
564 }
565 
oops_do(OopClosure * f)566 void ThreadDumpResult::oops_do(OopClosure* f) {
567   for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
568     ts->oops_do(f);
569   }
570 }
571 
metadata_do(void f (Metadata *))572 void ThreadDumpResult::metadata_do(void f(Metadata*)) {
573   for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) {
574     ts->metadata_do(f);
575   }
576 }
577 
t_list()578 ThreadsList* ThreadDumpResult::t_list() {
579   return _setter.list();
580 }
581 
StackFrameInfo(javaVFrame * jvf,bool with_lock_info)582 StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) {
583   _method = jvf->method();
584   _bci = jvf->bci();
585   _class_holder = _method->method_holder()->klass_holder();
586   _locked_monitors = NULL;
587   if (with_lock_info) {
588     ResourceMark rm;
589     HandleMark hm;
590     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
591     int length = list->length();
592     if (length > 0) {
593       _locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(length, true);
594       for (int i = 0; i < length; i++) {
595         MonitorInfo* monitor = list->at(i);
596         assert(monitor->owner() != NULL, "This monitor must have an owning object");
597         _locked_monitors->append(monitor->owner());
598       }
599     }
600   }
601 }
602 
oops_do(OopClosure * f)603 void StackFrameInfo::oops_do(OopClosure* f) {
604   if (_locked_monitors != NULL) {
605     int length = _locked_monitors->length();
606     for (int i = 0; i < length; i++) {
607       f->do_oop((oop*) _locked_monitors->adr_at(i));
608     }
609   }
610   f->do_oop(&_class_holder);
611 }
612 
metadata_do(void f (Metadata *))613 void StackFrameInfo::metadata_do(void f(Metadata*)) {
614   f(_method);
615 }
616 
print_on(outputStream * st) const617 void StackFrameInfo::print_on(outputStream* st) const {
618   ResourceMark rm;
619   java_lang_Throwable::print_stack_element(st, method(), bci());
620   int len = (_locked_monitors != NULL ? _locked_monitors->length() : 0);
621   for (int i = 0; i < len; i++) {
622     oop o = _locked_monitors->at(i);
623     st->print_cr("\t- locked <" INTPTR_FORMAT "> (a %s)", p2i(o), o->klass()->external_name());
624   }
625 
626 }
627 
628 // Iterate through monitor cache to find JNI locked monitors
629 class InflatedMonitorsClosure: public MonitorClosure {
630 private:
631   ThreadStackTrace* _stack_trace;
632   Thread* _thread;
633 public:
InflatedMonitorsClosure(Thread * t,ThreadStackTrace * st)634   InflatedMonitorsClosure(Thread* t, ThreadStackTrace* st) {
635     _thread = t;
636     _stack_trace = st;
637   }
do_monitor(ObjectMonitor * mid)638   void do_monitor(ObjectMonitor* mid) {
639     if (mid->owner() == _thread) {
640       oop object = (oop) mid->object();
641       if (!_stack_trace->is_owned_monitor_on_stack(object)) {
642         _stack_trace->add_jni_locked_monitor(object);
643       }
644     }
645   }
646 };
647 
ThreadStackTrace(JavaThread * t,bool with_locked_monitors)648 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
649   _thread = t;
650   _frames = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true);
651   _depth = 0;
652   _with_locked_monitors = with_locked_monitors;
653   if (_with_locked_monitors) {
654     _jni_locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true);
655   } else {
656     _jni_locked_monitors = NULL;
657   }
658 }
659 
~ThreadStackTrace()660 ThreadStackTrace::~ThreadStackTrace() {
661   for (int i = 0; i < _frames->length(); i++) {
662     delete _frames->at(i);
663   }
664   delete _frames;
665   if (_jni_locked_monitors != NULL) {
666     delete _jni_locked_monitors;
667   }
668 }
669 
dump_stack_at_safepoint(int maxDepth)670 void ThreadStackTrace::dump_stack_at_safepoint(int maxDepth) {
671   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
672 
673   if (_thread->has_last_Java_frame()) {
674     RegisterMap reg_map(_thread);
675     vframe* start_vf = _thread->last_java_vframe(&reg_map);
676     int count = 0;
677     for (vframe* f = start_vf; f; f = f->sender() ) {
678       if (maxDepth >= 0 && count == maxDepth) {
679         // Skip frames if more than maxDepth
680         break;
681       }
682       if (f->is_java_frame()) {
683         javaVFrame* jvf = javaVFrame::cast(f);
684         add_stack_frame(jvf);
685         count++;
686       } else {
687         // Ignore non-Java frames
688       }
689     }
690   }
691 
692   if (_with_locked_monitors) {
693     // Iterate inflated monitors and find monitors locked by this thread
694     // not found in the stack
695     InflatedMonitorsClosure imc(_thread, this);
696     ObjectSynchronizer::monitors_iterate(&imc);
697   }
698 }
699 
700 
is_owned_monitor_on_stack(oop object)701 bool ThreadStackTrace::is_owned_monitor_on_stack(oop object) {
702   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
703 
704   bool found = false;
705   int num_frames = get_stack_depth();
706   for (int depth = 0; depth < num_frames; depth++) {
707     StackFrameInfo* frame = stack_frame_at(depth);
708     int len = frame->num_locked_monitors();
709     GrowableArray<oop>* locked_monitors = frame->locked_monitors();
710     for (int j = 0; j < len; j++) {
711       oop monitor = locked_monitors->at(j);
712       assert(monitor != NULL, "must be a Java object");
713       if (monitor == object) {
714         found = true;
715         break;
716       }
717     }
718   }
719   return found;
720 }
721 
allocate_fill_stack_trace_element_array(TRAPS)722 Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
723   InstanceKlass* ik = SystemDictionary::StackTraceElement_klass();
724   assert(ik != NULL, "must be loaded in 1.4+");
725 
726   // Allocate an array of java/lang/StackTraceElement object
727   objArrayOop ste = oopFactory::new_objArray(ik, _depth, CHECK_NH);
728   objArrayHandle backtrace(THREAD, ste);
729   for (int j = 0; j < _depth; j++) {
730     StackFrameInfo* frame = _frames->at(j);
731     methodHandle mh(THREAD, frame->method());
732     oop element = java_lang_StackTraceElement::create(mh, frame->bci(), CHECK_NH);
733     backtrace->obj_at_put(j, element);
734   }
735   return backtrace;
736 }
737 
add_stack_frame(javaVFrame * jvf)738 void ThreadStackTrace::add_stack_frame(javaVFrame* jvf) {
739   StackFrameInfo* frame = new StackFrameInfo(jvf, _with_locked_monitors);
740   _frames->append(frame);
741   _depth++;
742 }
743 
oops_do(OopClosure * f)744 void ThreadStackTrace::oops_do(OopClosure* f) {
745   int length = _frames->length();
746   for (int i = 0; i < length; i++) {
747     _frames->at(i)->oops_do(f);
748   }
749 
750   length = (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0);
751   for (int j = 0; j < length; j++) {
752     f->do_oop((oop*) _jni_locked_monitors->adr_at(j));
753   }
754 }
755 
metadata_do(void f (Metadata *))756 void ThreadStackTrace::metadata_do(void f(Metadata*)) {
757   int length = _frames->length();
758   for (int i = 0; i < length; i++) {
759     _frames->at(i)->metadata_do(f);
760   }
761 }
762 
763 
~ConcurrentLocksDump()764 ConcurrentLocksDump::~ConcurrentLocksDump() {
765   if (_retain_map_on_free) {
766     return;
767   }
768 
769   for (ThreadConcurrentLocks* t = _map; t != NULL;)  {
770     ThreadConcurrentLocks* tcl = t;
771     t = t->next();
772     delete tcl;
773   }
774 }
775 
dump_at_safepoint()776 void ConcurrentLocksDump::dump_at_safepoint() {
777   // dump all locked concurrent locks
778   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
779 
780   GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true /* C_heap */);
781 
782   // Find all instances of AbstractOwnableSynchronizer
783   HeapInspection::find_instances_at_safepoint(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
784                                               aos_objects);
785   // Build a map of thread to its owned AQS locks
786   build_map(aos_objects);
787 
788   delete aos_objects;
789 }
790 
791 
792 // build a map of JavaThread to all its owned AbstractOwnableSynchronizer
build_map(GrowableArray<oop> * aos_objects)793 void ConcurrentLocksDump::build_map(GrowableArray<oop>* aos_objects) {
794   int length = aos_objects->length();
795   for (int i = 0; i < length; i++) {
796     oop o = aos_objects->at(i);
797     oop owner_thread_obj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(o);
798     if (owner_thread_obj != NULL) {
799       // See comments in ThreadConcurrentLocks to see how this
800       // JavaThread* is protected.
801       JavaThread* thread = java_lang_Thread::thread(owner_thread_obj);
802       assert(o->is_instance(), "Must be an instanceOop");
803       add_lock(thread, (instanceOop) o);
804     }
805   }
806 }
807 
add_lock(JavaThread * thread,instanceOop o)808 void ConcurrentLocksDump::add_lock(JavaThread* thread, instanceOop o) {
809   ThreadConcurrentLocks* tcl = thread_concurrent_locks(thread);
810   if (tcl != NULL) {
811     tcl->add_lock(o);
812     return;
813   }
814 
815   // First owned lock found for this thread
816   tcl = new ThreadConcurrentLocks(thread);
817   tcl->add_lock(o);
818   if (_map == NULL) {
819     _map = tcl;
820   } else {
821     _last->set_next(tcl);
822   }
823   _last = tcl;
824 }
825 
thread_concurrent_locks(JavaThread * thread)826 ThreadConcurrentLocks* ConcurrentLocksDump::thread_concurrent_locks(JavaThread* thread) {
827   for (ThreadConcurrentLocks* tcl = _map; tcl != NULL; tcl = tcl->next()) {
828     if (tcl->java_thread() == thread) {
829       return tcl;
830     }
831   }
832   return NULL;
833 }
834 
print_locks_on(JavaThread * t,outputStream * st)835 void ConcurrentLocksDump::print_locks_on(JavaThread* t, outputStream* st) {
836   st->print_cr("   Locked ownable synchronizers:");
837   ThreadConcurrentLocks* tcl = thread_concurrent_locks(t);
838   GrowableArray<instanceOop>* locks = (tcl != NULL ? tcl->owned_locks() : NULL);
839   if (locks == NULL || locks->is_empty()) {
840     st->print_cr("\t- None");
841     st->cr();
842     return;
843   }
844 
845   for (int i = 0; i < locks->length(); i++) {
846     instanceOop obj = locks->at(i);
847     st->print_cr("\t- <" INTPTR_FORMAT "> (a %s)", p2i(obj), obj->klass()->external_name());
848   }
849   st->cr();
850 }
851 
ThreadConcurrentLocks(JavaThread * thread)852 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
853   _thread = thread;
854   _owned_locks = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true);
855   _next = NULL;
856 }
857 
~ThreadConcurrentLocks()858 ThreadConcurrentLocks::~ThreadConcurrentLocks() {
859   delete _owned_locks;
860 }
861 
add_lock(instanceOop o)862 void ThreadConcurrentLocks::add_lock(instanceOop o) {
863   _owned_locks->append(o);
864 }
865 
oops_do(OopClosure * f)866 void ThreadConcurrentLocks::oops_do(OopClosure* f) {
867   int length = _owned_locks->length();
868   for (int i = 0; i < length; i++) {
869     f->do_oop((oop*) _owned_locks->adr_at(i));
870   }
871 }
872 
ThreadStatistics()873 ThreadStatistics::ThreadStatistics() {
874   _contended_enter_count = 0;
875   _monitor_wait_count = 0;
876   _sleep_count = 0;
877   _count_pending_reset = false;
878   _timer_pending_reset = false;
879   memset((void*) _perf_recursion_counts, 0, sizeof(_perf_recursion_counts));
880 }
881 
initialize(ThreadsList * t_list,JavaThread * thread)882 void ThreadSnapshot::initialize(ThreadsList * t_list, JavaThread* thread) {
883   _thread = thread;
884   _threadObj = thread->threadObj();
885 
886   ThreadStatistics* stat = thread->get_thread_stat();
887   _contended_enter_ticks = stat->contended_enter_ticks();
888   _contended_enter_count = stat->contended_enter_count();
889   _monitor_wait_ticks = stat->monitor_wait_ticks();
890   _monitor_wait_count = stat->monitor_wait_count();
891   _sleep_ticks = stat->sleep_ticks();
892   _sleep_count = stat->sleep_count();
893 
894   // If thread is still attaching then threadObj will be NULL.
895   _thread_status = _threadObj == NULL ? java_lang_Thread::NEW
896                                      : java_lang_Thread::get_thread_status(_threadObj);
897 
898   _is_ext_suspended = thread->is_being_ext_suspended();
899   _is_in_native = (thread->thread_state() == _thread_in_native);
900 
901   if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER ||
902       _thread_status == java_lang_Thread::IN_OBJECT_WAIT ||
903       _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) {
904 
905     Handle obj = ThreadService::get_current_contended_monitor(thread);
906     if (obj() == NULL) {
907       // monitor no longer exists; thread is not blocked
908       _thread_status = java_lang_Thread::RUNNABLE;
909     } else {
910       _blocker_object = obj();
911       JavaThread* owner = ObjectSynchronizer::get_lock_owner(t_list, obj);
912       if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER)
913           || (owner != NULL && owner->is_attaching_via_jni())) {
914         // ownership information of the monitor is not available
915         // (may no longer be owned or releasing to some other thread)
916         // make this thread in RUNNABLE state.
917         // And when the owner thread is in attaching state, the java thread
918         // is not completely initialized. For example thread name and id
919         // and may not be set, so hide the attaching thread.
920         _thread_status = java_lang_Thread::RUNNABLE;
921         _blocker_object = NULL;
922       } else if (owner != NULL) {
923         _blocker_object_owner = owner->threadObj();
924       }
925     }
926   }
927 
928   // Support for JSR-166 locks
929   if (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED) {
930     _blocker_object = thread->current_park_blocker();
931     if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass())) {
932       _blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object);
933     }
934   }
935 }
936 
~ThreadSnapshot()937 ThreadSnapshot::~ThreadSnapshot() {
938   delete _stack_trace;
939   delete _concurrent_locks;
940 }
941 
dump_stack_at_safepoint(int max_depth,bool with_locked_monitors)942 void ThreadSnapshot::dump_stack_at_safepoint(int max_depth, bool with_locked_monitors) {
943   _stack_trace = new ThreadStackTrace(_thread, with_locked_monitors);
944   _stack_trace->dump_stack_at_safepoint(max_depth);
945 }
946 
947 
oops_do(OopClosure * f)948 void ThreadSnapshot::oops_do(OopClosure* f) {
949   f->do_oop(&_threadObj);
950   f->do_oop(&_blocker_object);
951   f->do_oop(&_blocker_object_owner);
952   if (_stack_trace != NULL) {
953     _stack_trace->oops_do(f);
954   }
955   if (_concurrent_locks != NULL) {
956     _concurrent_locks->oops_do(f);
957   }
958 }
959 
metadata_do(void f (Metadata *))960 void ThreadSnapshot::metadata_do(void f(Metadata*)) {
961   if (_stack_trace != NULL) {
962     _stack_trace->metadata_do(f);
963   }
964 }
965 
966 
DeadlockCycle()967 DeadlockCycle::DeadlockCycle() {
968   _is_deadlock = false;
969   _threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
970   _next = NULL;
971 }
972 
~DeadlockCycle()973 DeadlockCycle::~DeadlockCycle() {
974   delete _threads;
975 }
976 
print_on_with(ThreadsList * t_list,outputStream * st) const977 void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const {
978   st->cr();
979   st->print_cr("Found one Java-level deadlock:");
980   st->print("=============================");
981 
982   JavaThread* currentThread;
983   JvmtiRawMonitor* waitingToLockRawMonitor;
984   oop waitingToLockBlocker;
985   int len = _threads->length();
986   for (int i = 0; i < len; i++) {
987     currentThread = _threads->at(i);
988     // The ObjectMonitor* can't be async deflated since we are at a safepoint.
989     ObjectMonitor* waitingToLockMonitor = currentThread->current_pending_monitor();
990     waitingToLockRawMonitor = currentThread->current_pending_raw_monitor();
991     waitingToLockBlocker = currentThread->current_park_blocker();
992     st->cr();
993     st->print_cr("\"%s\":", currentThread->get_thread_name());
994     const char* owner_desc = ",\n  which is held by";
995 
996     // Note: As the JVM TI "monitor contended enter" event callback is executed after ObjectMonitor
997     // sets the current pending monitor, it is possible to then see a pending raw monitor as well.
998     if (waitingToLockRawMonitor != NULL) {
999       st->print("  waiting to lock JVM TI raw monitor " INTPTR_FORMAT, p2i(waitingToLockRawMonitor));
1000       Thread* owner = waitingToLockRawMonitor->owner();
1001       // Could be NULL as the raw monitor could be released at any time if held by non-JavaThread
1002       if (owner != NULL) {
1003         if (owner->is_Java_thread()) {
1004           currentThread = (JavaThread*) owner;
1005           st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
1006         } else {
1007           st->print_cr(",\n  which has now been released");
1008         }
1009       } else {
1010         st->print_cr("%s non-Java thread=" PTR_FORMAT, owner_desc, p2i(owner));
1011       }
1012     }
1013 
1014     if (waitingToLockMonitor != NULL) {
1015       st->print("  waiting to lock monitor " INTPTR_FORMAT, p2i(waitingToLockMonitor));
1016       oop obj = (oop)waitingToLockMonitor->object();
1017       st->print(" (object " INTPTR_FORMAT ", a %s)", p2i(obj),
1018                  obj->klass()->external_name());
1019 
1020       if (!currentThread->current_pending_monitor_is_from_java()) {
1021         owner_desc = "\n  in JNI, which is held by";
1022       }
1023       currentThread = Threads::owning_thread_from_monitor_owner(t_list,
1024                                                                 (address)waitingToLockMonitor->owner());
1025       if (currentThread == NULL) {
1026         // The deadlock was detected at a safepoint so the JavaThread
1027         // that owns waitingToLockMonitor should be findable, but
1028         // if it is not findable, then the previous currentThread is
1029         // blocked permanently.
1030         st->print_cr("%s UNKNOWN_owner_addr=" PTR_FORMAT, owner_desc,
1031                   p2i(waitingToLockMonitor->owner()));
1032         continue;
1033       }
1034     } else {
1035       st->print("  waiting for ownable synchronizer " INTPTR_FORMAT ", (a %s)",
1036                 p2i(waitingToLockBlocker),
1037                 waitingToLockBlocker->klass()->external_name());
1038       assert(waitingToLockBlocker->is_a(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass()),
1039              "Must be an AbstractOwnableSynchronizer");
1040       oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
1041       currentThread = java_lang_Thread::thread(ownerObj);
1042       assert(currentThread != NULL, "AbstractOwnableSynchronizer owning thread is unexpectedly NULL");
1043     }
1044     st->print_cr("%s \"%s\"", owner_desc, currentThread->get_thread_name());
1045   }
1046 
1047   st->cr();
1048 
1049   // Print stack traces
1050   bool oldJavaMonitorsInStackTrace = JavaMonitorsInStackTrace;
1051   JavaMonitorsInStackTrace = true;
1052   st->print_cr("Java stack information for the threads listed above:");
1053   st->print_cr("===================================================");
1054   for (int j = 0; j < len; j++) {
1055     currentThread = _threads->at(j);
1056     st->print_cr("\"%s\":", currentThread->get_thread_name());
1057     currentThread->print_stack_on(st);
1058   }
1059   JavaMonitorsInStackTrace = oldJavaMonitorsInStackTrace;
1060 }
1061 
ThreadsListEnumerator(Thread * cur_thread,bool include_jvmti_agent_threads,bool include_jni_attaching_threads)1062 ThreadsListEnumerator::ThreadsListEnumerator(Thread* cur_thread,
1063                                              bool include_jvmti_agent_threads,
1064                                              bool include_jni_attaching_threads) {
1065   assert(cur_thread == Thread::current(), "Check current thread");
1066 
1067   int init_size = ThreadService::get_live_thread_count();
1068   _threads_array = new GrowableArray<instanceHandle>(init_size);
1069 
1070   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1071     // skips JavaThreads in the process of exiting
1072     // and also skips VM internal JavaThreads
1073     // Threads in _thread_new or _thread_new_trans state are included.
1074     // i.e. threads have been started but not yet running.
1075     if (jt->threadObj() == NULL   ||
1076         jt->is_exiting() ||
1077         !java_lang_Thread::is_alive(jt->threadObj())   ||
1078         jt->is_hidden_from_external_view()) {
1079       continue;
1080     }
1081 
1082     // skip agent threads
1083     if (!include_jvmti_agent_threads && jt->is_jvmti_agent_thread()) {
1084       continue;
1085     }
1086 
1087     // skip jni threads in the process of attaching
1088     if (!include_jni_attaching_threads && jt->is_attaching_via_jni()) {
1089       continue;
1090     }
1091 
1092     instanceHandle h(cur_thread, (instanceOop) jt->threadObj());
1093     _threads_array->append(h);
1094   }
1095 }
1096