1 //===-- sanitizer_deadlock_detector1.cpp ----------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Deadlock detector implementation based on NxN adjacency bit matrix.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_deadlock_detector_interface.h"
14 #include "sanitizer_deadlock_detector.h"
15 #include "sanitizer_allocator_internal.h"
16 #include "sanitizer_placement_new.h"
17 #include "sanitizer_mutex.h"
18 
19 #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
20 
21 namespace __sanitizer {
22 
23 typedef TwoLevelBitVector<> DDBV;  // DeadlockDetector's bit vector.
24 
25 struct DDPhysicalThread {
26 };
27 
28 struct DDLogicalThread {
29   u64 ctx;
30   DeadlockDetectorTLS<DDBV> dd;
31   DDReport rep;
32   bool report_pending;
33 };
34 
35 struct DD : public DDetector {
36   SpinMutex mtx;
37   DeadlockDetector<DDBV> dd;
38   DDFlags flags;
39 
40   explicit DD(const DDFlags *flags);
41 
42   DDPhysicalThread *CreatePhysicalThread() override;
43   void DestroyPhysicalThread(DDPhysicalThread *pt) override;
44 
45   DDLogicalThread *CreateLogicalThread(u64 ctx) override;
46   void DestroyLogicalThread(DDLogicalThread *lt) override;
47 
48   void MutexInit(DDCallback *cb, DDMutex *m) override;
49   void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
50   void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
51                       bool trylock) override;
52   void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
53   void MutexDestroy(DDCallback *cb, DDMutex *m) override;
54 
55   DDReport *GetReport(DDCallback *cb) override;
56 
57   void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
58   void ReportDeadlock(DDCallback *cb, DDMutex *m);
59 };
60 
Create(const DDFlags * flags)61 DDetector *DDetector::Create(const DDFlags *flags) {
62   (void)flags;
63   void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
64   return new(mem) DD(flags);
65 }
66 
DD(const DDFlags * flags)67 DD::DD(const DDFlags *flags)
68     : flags(*flags) {
69   dd.clear();
70 }
71 
CreatePhysicalThread()72 DDPhysicalThread* DD::CreatePhysicalThread() {
73   return nullptr;
74 }
75 
DestroyPhysicalThread(DDPhysicalThread * pt)76 void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
77 }
78 
CreateLogicalThread(u64 ctx)79 DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
80   DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
81   lt->ctx = ctx;
82   lt->dd.clear();
83   lt->report_pending = false;
84   return lt;
85 }
86 
DestroyLogicalThread(DDLogicalThread * lt)87 void DD::DestroyLogicalThread(DDLogicalThread *lt) {
88   lt->~DDLogicalThread();
89   InternalFree(lt);
90 }
91 
MutexInit(DDCallback * cb,DDMutex * m)92 void DD::MutexInit(DDCallback *cb, DDMutex *m) {
93   m->id = 0;
94   m->stk = cb->Unwind();
95 }
96 
MutexEnsureID(DDLogicalThread * lt,DDMutex * m)97 void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
98   if (!dd.nodeBelongsToCurrentEpoch(m->id))
99     m->id = dd.newNode(reinterpret_cast<uptr>(m));
100   dd.ensureCurrentEpoch(&lt->dd);
101 }
102 
MutexBeforeLock(DDCallback * cb,DDMutex * m,bool wlock)103 void DD::MutexBeforeLock(DDCallback *cb,
104     DDMutex *m, bool wlock) {
105   DDLogicalThread *lt = cb->lt;
106   if (lt->dd.empty()) return;  // This will be the first lock held by lt.
107   if (dd.hasAllEdges(&lt->dd, m->id)) return;  // We already have all edges.
108   SpinMutexLock lk(&mtx);
109   MutexEnsureID(lt, m);
110   if (dd.isHeld(&lt->dd, m->id))
111     return;  // FIXME: allow this only for recursive locks.
112   if (dd.onLockBefore(&lt->dd, m->id)) {
113     // Actually add this edge now so that we have all the stack traces.
114     dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
115     ReportDeadlock(cb, m);
116   }
117 }
118 
ReportDeadlock(DDCallback * cb,DDMutex * m)119 void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
120   DDLogicalThread *lt = cb->lt;
121   uptr path[20];
122   uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
123   if (len == 0U) {
124     // A cycle of 20+ locks? Well, that's a bit odd...
125     Printf("WARNING: too long mutex cycle found\n");
126     return;
127   }
128   CHECK_EQ(m->id, path[0]);
129   lt->report_pending = true;
130   len = Min<uptr>(len, DDReport::kMaxLoopSize);
131   DDReport *rep = &lt->rep;
132   rep->n = len;
133   for (uptr i = 0; i < len; i++) {
134     uptr from = path[i];
135     uptr to = path[(i + 1) % len];
136     DDMutex *m0 = (DDMutex*)dd.getData(from);
137     DDMutex *m1 = (DDMutex*)dd.getData(to);
138 
139     u32 stk_from = -1U, stk_to = -1U;
140     int unique_tid = 0;
141     dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
142     // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
143     //    unique_tid);
144     rep->loop[i].thr_ctx = unique_tid;
145     rep->loop[i].mtx_ctx0 = m0->ctx;
146     rep->loop[i].mtx_ctx1 = m1->ctx;
147     rep->loop[i].stk[0] = stk_to;
148     rep->loop[i].stk[1] = stk_from;
149   }
150 }
151 
MutexAfterLock(DDCallback * cb,DDMutex * m,bool wlock,bool trylock)152 void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
153   DDLogicalThread *lt = cb->lt;
154   u32 stk = 0;
155   if (flags.second_deadlock_stack)
156     stk = cb->Unwind();
157   // Printf("T%p MutexLock:   %zx stk %u\n", lt, m->id, stk);
158   if (dd.onFirstLock(&lt->dd, m->id, stk))
159     return;
160   if (dd.onLockFast(&lt->dd, m->id, stk))
161     return;
162 
163   SpinMutexLock lk(&mtx);
164   MutexEnsureID(lt, m);
165   if (wlock)  // Only a recursive rlock may be held.
166     CHECK(!dd.isHeld(&lt->dd, m->id));
167   if (!trylock)
168     dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
169   dd.onLockAfter(&lt->dd, m->id, stk);
170 }
171 
MutexBeforeUnlock(DDCallback * cb,DDMutex * m,bool wlock)172 void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
173   // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
174   dd.onUnlock(&cb->lt->dd, m->id);
175 }
176 
MutexDestroy(DDCallback * cb,DDMutex * m)177 void DD::MutexDestroy(DDCallback *cb,
178     DDMutex *m) {
179   if (!m->id) return;
180   SpinMutexLock lk(&mtx);
181   if (dd.nodeBelongsToCurrentEpoch(m->id))
182     dd.removeNode(m->id);
183   m->id = 0;
184 }
185 
GetReport(DDCallback * cb)186 DDReport *DD::GetReport(DDCallback *cb) {
187   if (!cb->lt->report_pending)
188     return nullptr;
189   cb->lt->report_pending = false;
190   return &cb->lt->rep;
191 }
192 
193 } // namespace __sanitizer
194 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
195