1 // Copyright (c) 2011-2020 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4 
5 #if defined(HAVE_CONFIG_H)
6 #include <config/bitcoin-config.h>
7 #endif
8 
9 #include <sync.h>
10 
11 #include <logging.h>
12 #include <tinyformat.h>
13 #include <util/strencodings.h>
14 #include <util/threadnames.h>
15 
16 #include <map>
17 #include <set>
18 #include <system_error>
19 #include <thread>
20 #include <unordered_map>
21 #include <utility>
22 #include <vector>
23 
24 #ifdef DEBUG_LOCKCONTENTION
25 #if !defined(HAVE_THREAD_LOCAL)
26 static_assert(false, "thread_local is not supported");
27 #endif
PrintLockContention(const char * pszName,const char * pszFile,int nLine)28 void PrintLockContention(const char* pszName, const char* pszFile, int nLine)
29 {
30     LogPrintf("LOCKCONTENTION: %s\n", pszName);
31     LogPrintf("Locker: %s:%d\n", pszFile, nLine);
32 }
33 #endif /* DEBUG_LOCKCONTENTION */
34 
35 #ifdef DEBUG_LOCKORDER
36 //
37 // Early deadlock detection.
38 // Problem being solved:
39 //    Thread 1 locks A, then B, then C
40 //    Thread 2 locks D, then C, then A
41 //     --> may result in deadlock between the two threads, depending on when they run.
42 // Solution implemented here:
43 // Keep track of pairs of locks: (A before B), (A before C), etc.
44 // Complain if any thread tries to lock in a different order.
45 //
46 
47 struct CLockLocation {
CLockLocationCLockLocation48     CLockLocation(
49         const char* pszName,
50         const char* pszFile,
51         int nLine,
52         bool fTryIn,
53         const std::string& thread_name)
54         : fTry(fTryIn),
55           mutexName(pszName),
56           sourceFile(pszFile),
57           m_thread_name(thread_name),
58           sourceLine(nLine) {}
59 
ToStringCLockLocation60     std::string ToString() const
61     {
62         return strprintf(
63             "'%s' in %s:%s%s (in thread '%s')",
64             mutexName, sourceFile, sourceLine, (fTry ? " (TRY)" : ""), m_thread_name);
65     }
66 
NameCLockLocation67     std::string Name() const
68     {
69         return mutexName;
70     }
71 
72 private:
73     bool fTry;
74     std::string mutexName;
75     std::string sourceFile;
76     const std::string& m_thread_name;
77     int sourceLine;
78 };
79 
80 using LockStackItem = std::pair<void*, CLockLocation>;
81 using LockStack = std::vector<LockStackItem>;
82 using LockStacks = std::unordered_map<std::thread::id, LockStack>;
83 
84 using LockPair = std::pair<void*, void*>;
85 using LockOrders = std::map<LockPair, LockStack>;
86 using InvLockOrders = std::set<LockPair>;
87 
88 struct LockData {
89     LockStacks m_lock_stacks;
90     LockOrders lockorders;
91     InvLockOrders invlockorders;
92     std::mutex dd_mutex;
93 };
94 
GetLockData()95 LockData& GetLockData() {
96     // This approach guarantees that the object is not destroyed until after its last use.
97     // The operating system automatically reclaims all the memory in a program's heap when that program exits.
98     // Since the ~LockData() destructor is never called, the LockData class and all
99     // its subclasses must have implicitly-defined destructors.
100     static LockData& lock_data = *new LockData();
101     return lock_data;
102 }
103 
potential_deadlock_detected(const LockPair & mismatch,const LockStack & s1,const LockStack & s2)104 static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2)
105 {
106     LogPrintf("POTENTIAL DEADLOCK DETECTED\n");
107     LogPrintf("Previous lock order was:\n");
108     for (const LockStackItem& i : s1) {
109         if (i.first == mismatch.first) {
110             LogPrintf(" (1)"); /* Continued */
111         }
112         if (i.first == mismatch.second) {
113             LogPrintf(" (2)"); /* Continued */
114         }
115         LogPrintf(" %s\n", i.second.ToString());
116     }
117 
118     std::string mutex_a, mutex_b;
119     LogPrintf("Current lock order is:\n");
120     for (const LockStackItem& i : s2) {
121         if (i.first == mismatch.first) {
122             LogPrintf(" (1)"); /* Continued */
123             mutex_a = i.second.Name();
124         }
125         if (i.first == mismatch.second) {
126             LogPrintf(" (2)"); /* Continued */
127             mutex_b = i.second.Name();
128         }
129         LogPrintf(" %s\n", i.second.ToString());
130     }
131     if (g_debug_lockorder_abort) {
132         tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString());
133         abort();
134     }
135     throw std::logic_error(strprintf("potential deadlock detected: %s -> %s -> %s", mutex_b, mutex_a, mutex_b));
136 }
137 
push_lock(void * c,const CLockLocation & locklocation)138 static void push_lock(void* c, const CLockLocation& locklocation)
139 {
140     LockData& lockdata = GetLockData();
141     std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
142 
143     LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
144     lock_stack.emplace_back(c, locklocation);
145     for (const LockStackItem& i : lock_stack) {
146         if (i.first == c)
147             break;
148 
149         const LockPair p1 = std::make_pair(i.first, c);
150         if (lockdata.lockorders.count(p1))
151             continue;
152 
153         const LockPair p2 = std::make_pair(c, i.first);
154         if (lockdata.lockorders.count(p2)) {
155             auto lock_stack_copy = lock_stack;
156             lock_stack.pop_back();
157             potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
158             // potential_deadlock_detected() does not return.
159         }
160 
161         lockdata.lockorders.emplace(p1, lock_stack);
162         lockdata.invlockorders.insert(p2);
163     }
164 }
165 
pop_lock()166 static void pop_lock()
167 {
168     LockData& lockdata = GetLockData();
169     std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
170 
171     LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
172     lock_stack.pop_back();
173     if (lock_stack.empty()) {
174         lockdata.m_lock_stacks.erase(std::this_thread::get_id());
175     }
176 }
177 
EnterCritical(const char * pszName,const char * pszFile,int nLine,void * cs,bool fTry)178 void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry)
179 {
180     push_lock(cs, CLockLocation(pszName, pszFile, nLine, fTry, util::ThreadGetInternalName()));
181 }
182 
CheckLastCritical(void * cs,std::string & lockname,const char * guardname,const char * file,int line)183 void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
184 {
185     {
186         LockData& lockdata = GetLockData();
187         std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
188 
189         const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
190         if (!lock_stack.empty()) {
191             const auto& lastlock = lock_stack.back();
192             if (lastlock.first == cs) {
193                 lockname = lastlock.second.Name();
194                 return;
195             }
196         }
197     }
198     throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname));
199 }
200 
LeaveCritical()201 void LeaveCritical()
202 {
203     pop_lock();
204 }
205 
LocksHeld()206 std::string LocksHeld()
207 {
208     LockData& lockdata = GetLockData();
209     std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
210 
211     const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
212     std::string result;
213     for (const LockStackItem& i : lock_stack)
214         result += i.second.ToString() + std::string("\n");
215     return result;
216 }
217 
LockHeld(void * mutex)218 static bool LockHeld(void* mutex)
219 {
220     LockData& lockdata = GetLockData();
221     std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
222 
223     const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
224     for (const LockStackItem& i : lock_stack) {
225         if (i.first == mutex) return true;
226     }
227 
228     return false;
229 }
230 
231 template <typename MutexType>
AssertLockHeldInternal(const char * pszName,const char * pszFile,int nLine,MutexType * cs)232 void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs)
233 {
234     if (LockHeld(cs)) return;
235     tfm::format(std::cerr, "Assertion failed: lock %s not held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
236     abort();
237 }
238 template void AssertLockHeldInternal(const char*, const char*, int, Mutex*);
239 template void AssertLockHeldInternal(const char*, const char*, int, RecursiveMutex*);
240 
241 template <typename MutexType>
AssertLockNotHeldInternal(const char * pszName,const char * pszFile,int nLine,MutexType * cs)242 void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs)
243 {
244     if (!LockHeld(cs)) return;
245     tfm::format(std::cerr, "Assertion failed: lock %s held in %s:%i; locks held:\n%s", pszName, pszFile, nLine, LocksHeld());
246     abort();
247 }
248 template void AssertLockNotHeldInternal(const char*, const char*, int, Mutex*);
249 template void AssertLockNotHeldInternal(const char*, const char*, int, RecursiveMutex*);
250 
DeleteLock(void * cs)251 void DeleteLock(void* cs)
252 {
253     LockData& lockdata = GetLockData();
254     std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
255     const LockPair item = std::make_pair(cs, nullptr);
256     LockOrders::iterator it = lockdata.lockorders.lower_bound(item);
257     while (it != lockdata.lockorders.end() && it->first.first == cs) {
258         const LockPair invitem = std::make_pair(it->first.second, it->first.first);
259         lockdata.invlockorders.erase(invitem);
260         lockdata.lockorders.erase(it++);
261     }
262     InvLockOrders::iterator invit = lockdata.invlockorders.lower_bound(item);
263     while (invit != lockdata.invlockorders.end() && invit->first == cs) {
264         const LockPair invinvitem = std::make_pair(invit->second, invit->first);
265         lockdata.lockorders.erase(invinvitem);
266         lockdata.invlockorders.erase(invit++);
267     }
268 }
269 
LockStackEmpty()270 bool LockStackEmpty()
271 {
272     LockData& lockdata = GetLockData();
273     std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
274     const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
275     if (it == lockdata.m_lock_stacks.end()) {
276         return true;
277     }
278     return it->second.empty();
279 }
280 
281 bool g_debug_lockorder_abort = true;
282 
283 #endif /* DEBUG_LOCKORDER */
284