1 //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
14 #include <sanitizer_common/sanitizer_stackdepot.h>
15
16 #include "tsan_rtl.h"
17 #include "tsan_flags.h"
18 #include "tsan_sync.h"
19 #include "tsan_report.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_platform.h"
22
23 namespace __tsan {
24
25 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
26
27 struct Callback final : public DDCallback {
28 ThreadState *thr;
29 uptr pc;
30
Callback__tsan::Callback31 Callback(ThreadState *thr, uptr pc)
32 : thr(thr)
33 , pc(pc) {
34 DDCallback::pt = thr->proc()->dd_pt;
35 DDCallback::lt = thr->dd_lt;
36 }
37
Unwind__tsan::Callback38 u32 Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback39 int UniqueTid() override { return thr->unique_id; }
40 };
41
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)42 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
43 Callback cb(thr, pc);
44 ctx->dd->MutexInit(&cb, &s->dd);
45 s->dd.ctx = s->GetId();
46 }
47
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)48 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
49 uptr addr, u64 mid) {
50 // In Go, these misuses are either impossible, or detected by std lib,
51 // or false positives (e.g. unlock in a different thread).
52 if (SANITIZER_GO)
53 return;
54 if (!ShouldReport(thr, typ))
55 return;
56 ThreadRegistryLock l(ctx->thread_registry);
57 ScopedReport rep(typ);
58 rep.AddMutex(mid);
59 VarSizeStackTrace trace;
60 ObtainCurrentStack(thr, pc, &trace);
61 rep.AddStack(trace, true);
62 rep.AddLocation(addr, 1);
63 OutputReport(thr, rep);
64 }
65
MutexCreate(ThreadState * thr,uptr pc,uptr addr,u32 flagz)66 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
67 DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
68 if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
69 CHECK(!thr->is_freeing);
70 thr->is_freeing = true;
71 MemoryWrite(thr, pc, addr, kSizeLog1);
72 thr->is_freeing = false;
73 }
74 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
75 s->SetFlags(flagz & MutexCreationFlagMask);
76 if (!SANITIZER_GO && s->creation_stack_id == 0)
77 s->creation_stack_id = CurrentStackId(thr, pc);
78 s->mtx.Unlock();
79 }
80
MutexDestroy(ThreadState * thr,uptr pc,uptr addr,u32 flagz)81 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
82 DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
83 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
84 if (s == 0)
85 return;
86 if ((flagz & MutexFlagLinkerInit)
87 || s->IsFlagSet(MutexFlagLinkerInit)
88 || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
89 // Destroy is no-op for linker-initialized mutexes.
90 s->mtx.Unlock();
91 return;
92 }
93 if (common_flags()->detect_deadlocks) {
94 Callback cb(thr, pc);
95 ctx->dd->MutexDestroy(&cb, &s->dd);
96 ctx->dd->MutexInit(&cb, &s->dd);
97 }
98 bool unlock_locked = false;
99 if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
100 !s->IsFlagSet(MutexFlagBroken)) {
101 s->SetFlags(MutexFlagBroken);
102 unlock_locked = true;
103 }
104 u64 mid = s->GetId();
105 u64 last_lock = s->last_lock;
106 if (!unlock_locked)
107 s->Reset(thr->proc()); // must not reset it before the report is printed
108 s->mtx.Unlock();
109 if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
110 ThreadRegistryLock l(ctx->thread_registry);
111 ScopedReport rep(ReportTypeMutexDestroyLocked);
112 rep.AddMutex(mid);
113 VarSizeStackTrace trace;
114 ObtainCurrentStack(thr, pc, &trace);
115 rep.AddStack(trace, true);
116 FastState last(last_lock);
117 RestoreStack(last.tid(), last.epoch(), &trace, 0);
118 rep.AddStack(trace, true);
119 rep.AddLocation(addr, 1);
120 OutputReport(thr, rep);
121
122 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
123 if (s != 0) {
124 s->Reset(thr->proc());
125 s->mtx.Unlock();
126 }
127 }
128 thr->mset.Remove(mid);
129 // Imitate a memory write to catch unlock-destroy races.
130 // Do this outside of sync mutex, because it can report a race which locks
131 // sync mutexes.
132 if (IsAppMem(addr)) {
133 CHECK(!thr->is_freeing);
134 thr->is_freeing = true;
135 MemoryWrite(thr, pc, addr, kSizeLog1);
136 thr->is_freeing = false;
137 }
138 // s will be destroyed and freed in MetaMap::FreeBlock.
139 }
140
MutexPreLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)141 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
142 DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
143 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
144 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
145 s->UpdateFlags(flagz);
146 if (s->owner_tid != thr->tid) {
147 Callback cb(thr, pc);
148 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
149 s->mtx.ReadUnlock();
150 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
151 } else {
152 s->mtx.ReadUnlock();
153 }
154 }
155 }
156
MutexPostLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz,int rec)157 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz,
158 int rec) NO_THREAD_SAFETY_ANALYSIS {
159 DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
160 thr->tid, addr, flagz, rec);
161 if (flagz & MutexFlagRecursiveLock)
162 CHECK_GT(rec, 0);
163 else
164 rec = 1;
165 if (IsAppMem(addr))
166 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
167 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
168 s->UpdateFlags(flagz);
169 thr->fast_state.IncrementEpoch();
170 TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
171 bool report_double_lock = false;
172 if (s->owner_tid == kInvalidTid) {
173 CHECK_EQ(s->recursion, 0);
174 s->owner_tid = thr->tid;
175 s->last_lock = thr->fast_state.raw();
176 } else if (s->owner_tid == thr->tid) {
177 CHECK_GT(s->recursion, 0);
178 } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
179 s->SetFlags(MutexFlagBroken);
180 report_double_lock = true;
181 }
182 const bool first = s->recursion == 0;
183 s->recursion += rec;
184 if (first) {
185 AcquireImpl(thr, pc, &s->clock);
186 AcquireImpl(thr, pc, &s->read_clock);
187 } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
188 }
189 thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
190 bool pre_lock = false;
191 if (first && common_flags()->detect_deadlocks) {
192 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
193 !(flagz & MutexFlagTryLock);
194 Callback cb(thr, pc);
195 if (pre_lock)
196 ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
197 ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
198 }
199 u64 mid = s->GetId();
200 s->mtx.Unlock();
201 // Can't touch s after this point.
202 s = 0;
203 if (report_double_lock)
204 ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
205 if (first && pre_lock && common_flags()->detect_deadlocks) {
206 Callback cb(thr, pc);
207 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
208 }
209 }
210
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)211 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
212 DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
213 if (IsAppMem(addr))
214 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
215 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
216 thr->fast_state.IncrementEpoch();
217 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
218 int rec = 0;
219 bool report_bad_unlock = false;
220 if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
221 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
222 s->SetFlags(MutexFlagBroken);
223 report_bad_unlock = true;
224 }
225 } else {
226 rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
227 s->recursion -= rec;
228 if (s->recursion == 0) {
229 s->owner_tid = kInvalidTid;
230 ReleaseStoreImpl(thr, pc, &s->clock);
231 } else {
232 }
233 }
234 thr->mset.Del(s->GetId(), true);
235 if (common_flags()->detect_deadlocks && s->recursion == 0 &&
236 !report_bad_unlock) {
237 Callback cb(thr, pc);
238 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
239 }
240 u64 mid = s->GetId();
241 s->mtx.Unlock();
242 // Can't touch s after this point.
243 if (report_bad_unlock)
244 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
245 if (common_flags()->detect_deadlocks && !report_bad_unlock) {
246 Callback cb(thr, pc);
247 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
248 }
249 return rec;
250 }
251
MutexPreReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)252 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
253 DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
254 if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
255 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
256 s->UpdateFlags(flagz);
257 Callback cb(thr, pc);
258 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
259 s->mtx.ReadUnlock();
260 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
261 }
262 }
263
MutexPostReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)264 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
265 DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
266 if (IsAppMem(addr))
267 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
268 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
269 s->UpdateFlags(flagz);
270 thr->fast_state.IncrementEpoch();
271 TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
272 bool report_bad_lock = false;
273 if (s->owner_tid != kInvalidTid) {
274 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
275 s->SetFlags(MutexFlagBroken);
276 report_bad_lock = true;
277 }
278 }
279 AcquireImpl(thr, pc, &s->clock);
280 s->last_lock = thr->fast_state.raw();
281 thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
282 bool pre_lock = false;
283 if (common_flags()->detect_deadlocks) {
284 pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
285 !(flagz & MutexFlagTryLock);
286 Callback cb(thr, pc);
287 if (pre_lock)
288 ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
289 ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
290 }
291 u64 mid = s->GetId();
292 s->mtx.ReadUnlock();
293 // Can't touch s after this point.
294 s = 0;
295 if (report_bad_lock)
296 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
297 if (pre_lock && common_flags()->detect_deadlocks) {
298 Callback cb(thr, pc);
299 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
300 }
301 }
302
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)303 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
304 DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
305 if (IsAppMem(addr))
306 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
307 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
308 thr->fast_state.IncrementEpoch();
309 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
310 bool report_bad_unlock = false;
311 if (s->owner_tid != kInvalidTid) {
312 if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
313 s->SetFlags(MutexFlagBroken);
314 report_bad_unlock = true;
315 }
316 }
317 ReleaseImpl(thr, pc, &s->read_clock);
318 if (common_flags()->detect_deadlocks && s->recursion == 0) {
319 Callback cb(thr, pc);
320 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
321 }
322 u64 mid = s->GetId();
323 s->mtx.Unlock();
324 // Can't touch s after this point.
325 thr->mset.Del(mid, false);
326 if (report_bad_unlock)
327 ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
328 if (common_flags()->detect_deadlocks) {
329 Callback cb(thr, pc);
330 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
331 }
332 }
333
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)334 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
335 DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
336 if (IsAppMem(addr))
337 MemoryReadAtomic(thr, pc, addr, kSizeLog1);
338 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
339 bool write = true;
340 bool report_bad_unlock = false;
341 if (s->owner_tid == kInvalidTid) {
342 // Seems to be read unlock.
343 write = false;
344 thr->fast_state.IncrementEpoch();
345 TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
346 ReleaseImpl(thr, pc, &s->read_clock);
347 } else if (s->owner_tid == thr->tid) {
348 // Seems to be write unlock.
349 thr->fast_state.IncrementEpoch();
350 TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
351 CHECK_GT(s->recursion, 0);
352 s->recursion--;
353 if (s->recursion == 0) {
354 s->owner_tid = kInvalidTid;
355 ReleaseStoreImpl(thr, pc, &s->clock);
356 } else {
357 }
358 } else if (!s->IsFlagSet(MutexFlagBroken)) {
359 s->SetFlags(MutexFlagBroken);
360 report_bad_unlock = true;
361 }
362 thr->mset.Del(s->GetId(), write);
363 if (common_flags()->detect_deadlocks && s->recursion == 0) {
364 Callback cb(thr, pc);
365 ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
366 }
367 u64 mid = s->GetId();
368 s->mtx.Unlock();
369 // Can't touch s after this point.
370 if (report_bad_unlock)
371 ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
372 if (common_flags()->detect_deadlocks) {
373 Callback cb(thr, pc);
374 ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
375 }
376 }
377
MutexRepair(ThreadState * thr,uptr pc,uptr addr)378 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
379 DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
380 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
381 s->owner_tid = kInvalidTid;
382 s->recursion = 0;
383 s->mtx.Unlock();
384 }
385
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)386 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
387 DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
388 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
389 u64 mid = s->GetId();
390 s->mtx.Unlock();
391 ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
392 }
393
Acquire(ThreadState * thr,uptr pc,uptr addr)394 void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
395 DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
396 if (thr->ignore_sync)
397 return;
398 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
399 if (!s)
400 return;
401 AcquireImpl(thr, pc, &s->clock);
402 s->mtx.ReadUnlock();
403 }
404
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)405 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
406 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
407 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
408 u64 epoch = tctx->epoch1;
409 if (tctx->status == ThreadStatusRunning) {
410 epoch = tctx->thr->fast_state.epoch();
411 tctx->thr->clock.NoteGlobalAcquire(epoch);
412 }
413 thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
414 }
415
AcquireGlobal(ThreadState * thr,uptr pc)416 void AcquireGlobal(ThreadState *thr, uptr pc) {
417 DPrintf("#%d: AcquireGlobal\n", thr->tid);
418 if (thr->ignore_sync)
419 return;
420 ThreadRegistryLock l(ctx->thread_registry);
421 ctx->thread_registry->RunCallbackForEachThreadLocked(
422 UpdateClockCallback, thr);
423 }
424
ReleaseStoreAcquire(ThreadState * thr,uptr pc,uptr addr)425 void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
426 DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
427 if (thr->ignore_sync)
428 return;
429 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
430 thr->fast_state.IncrementEpoch();
431 // Can't increment epoch w/o writing to the trace as well.
432 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
433 ReleaseStoreAcquireImpl(thr, pc, &s->clock);
434 s->mtx.Unlock();
435 }
436
Release(ThreadState * thr,uptr pc,uptr addr)437 void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
438 DPrintf("#%d: Release %zx\n", thr->tid, addr);
439 if (thr->ignore_sync)
440 return;
441 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
442 thr->fast_state.IncrementEpoch();
443 // Can't increment epoch w/o writing to the trace as well.
444 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
445 ReleaseImpl(thr, pc, &s->clock);
446 s->mtx.Unlock();
447 }
448
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)449 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
450 DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
451 if (thr->ignore_sync)
452 return;
453 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
454 thr->fast_state.IncrementEpoch();
455 // Can't increment epoch w/o writing to the trace as well.
456 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
457 ReleaseStoreImpl(thr, pc, &s->clock);
458 s->mtx.Unlock();
459 }
460
461 #if !SANITIZER_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)462 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
463 ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
464 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
465 u64 epoch = tctx->epoch1;
466 if (tctx->status == ThreadStatusRunning)
467 epoch = tctx->thr->fast_state.epoch();
468 thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
469 }
470
AfterSleep(ThreadState * thr,uptr pc)471 void AfterSleep(ThreadState *thr, uptr pc) {
472 DPrintf("#%d: AfterSleep %zx\n", thr->tid);
473 if (thr->ignore_sync)
474 return;
475 thr->last_sleep_stack_id = CurrentStackId(thr, pc);
476 ThreadRegistryLock l(ctx->thread_registry);
477 ctx->thread_registry->RunCallbackForEachThreadLocked(
478 UpdateSleepClockCallback, thr);
479 }
480 #endif
481
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)482 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
483 if (thr->ignore_sync)
484 return;
485 thr->clock.set(thr->fast_state.epoch());
486 thr->clock.acquire(&thr->proc()->clock_cache, c);
487 }
488
ReleaseStoreAcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)489 void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
490 if (thr->ignore_sync)
491 return;
492 thr->clock.set(thr->fast_state.epoch());
493 thr->fast_synch_epoch = thr->fast_state.epoch();
494 thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
495 }
496
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)497 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
498 if (thr->ignore_sync)
499 return;
500 thr->clock.set(thr->fast_state.epoch());
501 thr->fast_synch_epoch = thr->fast_state.epoch();
502 thr->clock.release(&thr->proc()->clock_cache, c);
503 }
504
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)505 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
506 if (thr->ignore_sync)
507 return;
508 thr->clock.set(thr->fast_state.epoch());
509 thr->fast_synch_epoch = thr->fast_state.epoch();
510 thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
511 }
512
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)513 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
514 if (thr->ignore_sync)
515 return;
516 thr->clock.set(thr->fast_state.epoch());
517 thr->fast_synch_epoch = thr->fast_state.epoch();
518 thr->clock.acq_rel(&thr->proc()->clock_cache, c);
519 }
520
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)521 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
522 if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
523 return;
524 ThreadRegistryLock l(ctx->thread_registry);
525 ScopedReport rep(ReportTypeDeadlock);
526 for (int i = 0; i < r->n; i++) {
527 rep.AddMutex(r->loop[i].mtx_ctx0);
528 rep.AddUniqueTid((int)r->loop[i].thr_ctx);
529 rep.AddThread((int)r->loop[i].thr_ctx);
530 }
531 uptr dummy_pc = 0x42;
532 for (int i = 0; i < r->n; i++) {
533 for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
534 u32 stk = r->loop[i].stk[j];
535 if (stk && stk != 0xffffffff) {
536 rep.AddStack(StackDepotGet(stk), true);
537 } else {
538 // Sometimes we fail to extract the stack trace (FIXME: investigate),
539 // but we should still produce some stack trace in the report.
540 rep.AddStack(StackTrace(&dummy_pc, 1), true);
541 }
542 }
543 }
544 OutputReport(thr, rep);
545 }
546
547 } // namespace __tsan
548