1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
14 
15 #include "tsan_rtl.h"
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
21 
22 namespace __tsan {
23 
24 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
25 
26 struct Callback : DDCallback {
27   ThreadState *thr;
28   uptr pc;
29 
Callback__tsan::Callback30   Callback(ThreadState *thr, uptr pc)
31       : thr(thr)
32       , pc(pc) {
33     DDCallback::pt = thr->proc()->dd_pt;
34     DDCallback::lt = thr->dd_lt;
35   }
36 
Unwind__tsan::Callback37   u32 Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback38   int UniqueTid() override { return thr->unique_id; }
39 };
40 
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)41 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
42   Callback cb(thr, pc);
43   ctx->dd->MutexInit(&cb, &s->dd);
44   s->dd.ctx = s->GetId();
45 }
46 
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)47 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
48     uptr addr, u64 mid) {
49   // In Go, these misuses are either impossible, or detected by std lib,
50   // or false positives (e.g. unlock in a different thread).
51   if (SANITIZER_GO)
52     return;
53   ThreadRegistryLock l(ctx->thread_registry);
54   ScopedReport rep(typ);
55   rep.AddMutex(mid);
56   VarSizeStackTrace trace;
57   ObtainCurrentStack(thr, pc, &trace);
58   rep.AddStack(trace, true);
59   rep.AddLocation(addr, 1);
60   OutputReport(thr, rep);
61 }
62 
MutexCreate(ThreadState * thr,uptr pc,uptr addr,u32 flagz)63 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
64   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
65   StatInc(thr, StatMutexCreate);
66   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
67     CHECK(!thr->is_freeing);
68     thr->is_freeing = true;
69     MemoryWrite(thr, pc, addr, kSizeLog1);
70     thr->is_freeing = false;
71   }
72   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
73   s->SetFlags(flagz & MutexCreationFlagMask);
74   if (!SANITIZER_GO && s->creation_stack_id == 0)
75     s->creation_stack_id = CurrentStackId(thr, pc);
76   s->mtx.Unlock();
77 }
78 
MutexDestroy(ThreadState * thr,uptr pc,uptr addr,u32 flagz)79 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
80   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
81   StatInc(thr, StatMutexDestroy);
82   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
83   if (s == 0)
84     return;
85   if ((flagz & MutexFlagLinkerInit)
86       || s->IsFlagSet(MutexFlagLinkerInit)
87       || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
88     // Destroy is no-op for linker-initialized mutexes.
89     s->mtx.Unlock();
90     return;
91   }
92   if (common_flags()->detect_deadlocks) {
93     Callback cb(thr, pc);
94     ctx->dd->MutexDestroy(&cb, &s->dd);
95     ctx->dd->MutexInit(&cb, &s->dd);
96   }
97   bool unlock_locked = false;
98   if (flags()->report_destroy_locked
99       && s->owner_tid != SyncVar::kInvalidTid
100       && !s->IsFlagSet(MutexFlagBroken)) {
101     s->SetFlags(MutexFlagBroken);
102     unlock_locked = true;
103   }
104   u64 mid = s->GetId();
105   u64 last_lock = s->last_lock;
106   if (!unlock_locked)
107     s->Reset(thr->proc());  // must not reset it before the report is printed
108   s->mtx.Unlock();
109   if (unlock_locked) {
110     ThreadRegistryLock l(ctx->thread_registry);
111     ScopedReport rep(ReportTypeMutexDestroyLocked);
112     rep.AddMutex(mid);
113     VarSizeStackTrace trace;
114     ObtainCurrentStack(thr, pc, &trace);
115     rep.AddStack(trace, true);
116     FastState last(last_lock);
117     RestoreStack(last.tid(), last.epoch(), &trace, 0);
118     rep.AddStack(trace, true);
119     rep.AddLocation(addr, 1);
120     OutputReport(thr, rep);
121 
122     SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
123     if (s != 0) {
124       s->Reset(thr->proc());
125       s->mtx.Unlock();
126     }
127   }
128   thr->mset.Remove(mid);
129   // Imitate a memory write to catch unlock-destroy races.
130   // Do this outside of sync mutex, because it can report a race which locks
131   // sync mutexes.
132   if (IsAppMem(addr)) {
133     CHECK(!thr->is_freeing);
134     thr->is_freeing = true;
135     MemoryWrite(thr, pc, addr, kSizeLog1);
136     thr->is_freeing = false;
137   }
138   // s will be destroyed and freed in MetaMap::FreeBlock.
139 }
140 
MutexPreLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)141 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
142   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
143   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
144     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
145     s->UpdateFlags(flagz);
146     if (s->owner_tid != thr->tid) {
147       Callback cb(thr, pc);
148       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
149       s->mtx.ReadUnlock();
150       ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
151     } else {
152       s->mtx.ReadUnlock();
153     }
154   }
155 }
156 
MutexPostLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz,int rec)157 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
158   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
159       thr->tid, addr, flagz, rec);
160   if (flagz & MutexFlagRecursiveLock)
161     CHECK_GT(rec, 0);
162   else
163     rec = 1;
164   if (IsAppMem(addr))
165     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
166   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
167   s->UpdateFlags(flagz);
168   thr->fast_state.IncrementEpoch();
169   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
170   bool report_double_lock = false;
171   if (s->owner_tid == SyncVar::kInvalidTid) {
172     CHECK_EQ(s->recursion, 0);
173     s->owner_tid = thr->tid;
174     s->last_lock = thr->fast_state.raw();
175   } else if (s->owner_tid == thr->tid) {
176     CHECK_GT(s->recursion, 0);
177   } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
178     s->SetFlags(MutexFlagBroken);
179     report_double_lock = true;
180   }
181   const bool first = s->recursion == 0;
182   s->recursion += rec;
183   if (first) {
184     StatInc(thr, StatMutexLock);
185     AcquireImpl(thr, pc, &s->clock);
186     AcquireImpl(thr, pc, &s->read_clock);
187   } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
188     StatInc(thr, StatMutexRecLock);
189   }
190   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
191   bool pre_lock = false;
192   if (first && common_flags()->detect_deadlocks) {
193     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
194         !(flagz & MutexFlagTryLock);
195     Callback cb(thr, pc);
196     if (pre_lock)
197       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
198     ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
199   }
200   u64 mid = s->GetId();
201   s->mtx.Unlock();
202   // Can't touch s after this point.
203   s = 0;
204   if (report_double_lock)
205     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
206   if (first && pre_lock && common_flags()->detect_deadlocks) {
207     Callback cb(thr, pc);
208     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
209   }
210 }
211 
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)212 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
213   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
214   if (IsAppMem(addr))
215     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
216   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
217   thr->fast_state.IncrementEpoch();
218   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
219   int rec = 0;
220   bool report_bad_unlock = false;
221   if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
222     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
223       s->SetFlags(MutexFlagBroken);
224       report_bad_unlock = true;
225     }
226   } else {
227     rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
228     s->recursion -= rec;
229     if (s->recursion == 0) {
230       StatInc(thr, StatMutexUnlock);
231       s->owner_tid = SyncVar::kInvalidTid;
232       ReleaseStoreImpl(thr, pc, &s->clock);
233     } else {
234       StatInc(thr, StatMutexRecUnlock);
235     }
236   }
237   thr->mset.Del(s->GetId(), true);
238   if (common_flags()->detect_deadlocks && s->recursion == 0 &&
239       !report_bad_unlock) {
240     Callback cb(thr, pc);
241     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
242   }
243   u64 mid = s->GetId();
244   s->mtx.Unlock();
245   // Can't touch s after this point.
246   if (report_bad_unlock)
247     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
248   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
249     Callback cb(thr, pc);
250     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
251   }
252   return rec;
253 }
254 
MutexPreReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)255 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
256   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
257   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
258     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
259     s->UpdateFlags(flagz);
260     Callback cb(thr, pc);
261     ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
262     s->mtx.ReadUnlock();
263     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
264   }
265 }
266 
MutexPostReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)267 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
268   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
269   StatInc(thr, StatMutexReadLock);
270   if (IsAppMem(addr))
271     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
272   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
273   s->UpdateFlags(flagz);
274   thr->fast_state.IncrementEpoch();
275   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
276   bool report_bad_lock = false;
277   if (s->owner_tid != SyncVar::kInvalidTid) {
278     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
279       s->SetFlags(MutexFlagBroken);
280       report_bad_lock = true;
281     }
282   }
283   AcquireImpl(thr, pc, &s->clock);
284   s->last_lock = thr->fast_state.raw();
285   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
286   bool pre_lock = false;
287   if (common_flags()->detect_deadlocks) {
288     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
289         !(flagz & MutexFlagTryLock);
290     Callback cb(thr, pc);
291     if (pre_lock)
292       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
293     ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
294   }
295   u64 mid = s->GetId();
296   s->mtx.ReadUnlock();
297   // Can't touch s after this point.
298   s = 0;
299   if (report_bad_lock)
300     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
301   if (pre_lock  && common_flags()->detect_deadlocks) {
302     Callback cb(thr, pc);
303     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
304   }
305 }
306 
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)307 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
308   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
309   StatInc(thr, StatMutexReadUnlock);
310   if (IsAppMem(addr))
311     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
312   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
313   thr->fast_state.IncrementEpoch();
314   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
315   bool report_bad_unlock = false;
316   if (s->owner_tid != SyncVar::kInvalidTid) {
317     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
318       s->SetFlags(MutexFlagBroken);
319       report_bad_unlock = true;
320     }
321   }
322   ReleaseImpl(thr, pc, &s->read_clock);
323   if (common_flags()->detect_deadlocks && s->recursion == 0) {
324     Callback cb(thr, pc);
325     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
326   }
327   u64 mid = s->GetId();
328   s->mtx.Unlock();
329   // Can't touch s after this point.
330   thr->mset.Del(mid, false);
331   if (report_bad_unlock)
332     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
333   if (common_flags()->detect_deadlocks) {
334     Callback cb(thr, pc);
335     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
336   }
337 }
338 
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)339 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
340   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
341   if (IsAppMem(addr))
342     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
343   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
344   bool write = true;
345   bool report_bad_unlock = false;
346   if (s->owner_tid == SyncVar::kInvalidTid) {
347     // Seems to be read unlock.
348     write = false;
349     StatInc(thr, StatMutexReadUnlock);
350     thr->fast_state.IncrementEpoch();
351     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
352     ReleaseImpl(thr, pc, &s->read_clock);
353   } else if (s->owner_tid == thr->tid) {
354     // Seems to be write unlock.
355     thr->fast_state.IncrementEpoch();
356     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
357     CHECK_GT(s->recursion, 0);
358     s->recursion--;
359     if (s->recursion == 0) {
360       StatInc(thr, StatMutexUnlock);
361       s->owner_tid = SyncVar::kInvalidTid;
362       ReleaseStoreImpl(thr, pc, &s->clock);
363     } else {
364       StatInc(thr, StatMutexRecUnlock);
365     }
366   } else if (!s->IsFlagSet(MutexFlagBroken)) {
367     s->SetFlags(MutexFlagBroken);
368     report_bad_unlock = true;
369   }
370   thr->mset.Del(s->GetId(), write);
371   if (common_flags()->detect_deadlocks && s->recursion == 0) {
372     Callback cb(thr, pc);
373     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
374   }
375   u64 mid = s->GetId();
376   s->mtx.Unlock();
377   // Can't touch s after this point.
378   if (report_bad_unlock)
379     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
380   if (common_flags()->detect_deadlocks) {
381     Callback cb(thr, pc);
382     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
383   }
384 }
385 
MutexRepair(ThreadState * thr,uptr pc,uptr addr)386 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
387   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
388   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
389   s->owner_tid = SyncVar::kInvalidTid;
390   s->recursion = 0;
391   s->mtx.Unlock();
392 }
393 
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)394 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
395   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
396   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
397   u64 mid = s->GetId();
398   s->mtx.Unlock();
399   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
400 }
401 
Acquire(ThreadState * thr,uptr pc,uptr addr)402 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
403   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
404   if (thr->ignore_sync)
405     return;
406   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
407   if (!s)
408     return;
409   AcquireImpl(thr, pc, &s->clock);
410   s->mtx.ReadUnlock();
411 }
412 
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)413 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
414   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
415   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
416   u64 epoch = tctx->epoch1;
417   if (tctx->status == ThreadStatusRunning)
418     epoch = tctx->thr->fast_state.epoch();
419   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
420 }
421 
AcquireGlobal(ThreadState * thr,uptr pc)422 void AcquireGlobal(ThreadState *thr, uptr pc) {
423   DPrintf("#%d: AcquireGlobal\n", thr->tid);
424   if (thr->ignore_sync)
425     return;
426   ThreadRegistryLock l(ctx->thread_registry);
427   ctx->thread_registry->RunCallbackForEachThreadLocked(
428       UpdateClockCallback, thr);
429 }
430 
Release(ThreadState * thr,uptr pc,uptr addr)431 void Release(ThreadState *thr, uptr pc, uptr addr) {
432   DPrintf("#%d: Release %zx\n", thr->tid, addr);
433   if (thr->ignore_sync)
434     return;
435   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
436   thr->fast_state.IncrementEpoch();
437   // Can't increment epoch w/o writing to the trace as well.
438   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
439   ReleaseImpl(thr, pc, &s->clock);
440   s->mtx.Unlock();
441 }
442 
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)443 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
444   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
445   if (thr->ignore_sync)
446     return;
447   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
448   thr->fast_state.IncrementEpoch();
449   // Can't increment epoch w/o writing to the trace as well.
450   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
451   ReleaseStoreImpl(thr, pc, &s->clock);
452   s->mtx.Unlock();
453 }
454 
455 #if !SANITIZER_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)456 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
457   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
458   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
459   u64 epoch = tctx->epoch1;
460   if (tctx->status == ThreadStatusRunning)
461     epoch = tctx->thr->fast_state.epoch();
462   thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
463 }
464 
AfterSleep(ThreadState * thr,uptr pc)465 void AfterSleep(ThreadState *thr, uptr pc) {
466   DPrintf("#%d: AfterSleep %zx\n", thr->tid);
467   if (thr->ignore_sync)
468     return;
469   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
470   ThreadRegistryLock l(ctx->thread_registry);
471   ctx->thread_registry->RunCallbackForEachThreadLocked(
472       UpdateSleepClockCallback, thr);
473 }
474 #endif
475 
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)476 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
477   if (thr->ignore_sync)
478     return;
479   thr->clock.set(thr->fast_state.epoch());
480   thr->clock.acquire(&thr->proc()->clock_cache, c);
481   StatInc(thr, StatSyncAcquire);
482 }
483 
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)484 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
485   if (thr->ignore_sync)
486     return;
487   thr->clock.set(thr->fast_state.epoch());
488   thr->fast_synch_epoch = thr->fast_state.epoch();
489   thr->clock.release(&thr->proc()->clock_cache, c);
490   StatInc(thr, StatSyncRelease);
491 }
492 
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)493 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
494   if (thr->ignore_sync)
495     return;
496   thr->clock.set(thr->fast_state.epoch());
497   thr->fast_synch_epoch = thr->fast_state.epoch();
498   thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
499   StatInc(thr, StatSyncRelease);
500 }
501 
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)502 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
503   if (thr->ignore_sync)
504     return;
505   thr->clock.set(thr->fast_state.epoch());
506   thr->fast_synch_epoch = thr->fast_state.epoch();
507   thr->clock.acq_rel(&thr->proc()->clock_cache, c);
508   StatInc(thr, StatSyncAcquire);
509   StatInc(thr, StatSyncRelease);
510 }
511 
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)512 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
513   if (r == 0)
514     return;
515   ThreadRegistryLock l(ctx->thread_registry);
516   ScopedReport rep(ReportTypeDeadlock);
517   for (int i = 0; i < r->n; i++) {
518     rep.AddMutex(r->loop[i].mtx_ctx0);
519     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
520     rep.AddThread((int)r->loop[i].thr_ctx);
521   }
522   uptr dummy_pc = 0x42;
523   for (int i = 0; i < r->n; i++) {
524     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
525       u32 stk = r->loop[i].stk[j];
526       if (stk && stk != 0xffffffff) {
527         rep.AddStack(StackDepotGet(stk), true);
528       } else {
529         // Sometimes we fail to extract the stack trace (FIXME: investigate),
530         // but we should still produce some stack trace in the report.
531         rep.AddStack(StackTrace(&dummy_pc, 1), true);
532       }
533     }
534   }
535   OutputReport(thr, rep);
536 }
537 
538 }  // namespace __tsan
539