1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
13 #include <sanitizer_common/sanitizer_stackdepot.h>
14 
15 #include "tsan_rtl.h"
16 #include "tsan_flags.h"
17 #include "tsan_sync.h"
18 #include "tsan_report.h"
19 #include "tsan_symbolize.h"
20 #include "tsan_platform.h"
21 
22 namespace __tsan {
23 
24 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
25 
26 struct Callback : DDCallback {
27   ThreadState *thr;
28   uptr pc;
29 
Callback__tsan::Callback30   Callback(ThreadState *thr, uptr pc)
31       : thr(thr)
32       , pc(pc) {
33     DDCallback::pt = thr->proc()->dd_pt;
34     DDCallback::lt = thr->dd_lt;
35   }
36 
Unwind__tsan::Callback37   u32 Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback38   int UniqueTid() override { return thr->unique_id; }
39 };
40 
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)41 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
42   Callback cb(thr, pc);
43   ctx->dd->MutexInit(&cb, &s->dd);
44   s->dd.ctx = s->GetId();
45 }
46 
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)47 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
48     uptr addr, u64 mid) {
49   // In Go, these misuses are either impossible, or detected by std lib,
50   // or false positives (e.g. unlock in a different thread).
51   if (SANITIZER_GO)
52     return;
53   ThreadRegistryLock l(ctx->thread_registry);
54   ScopedReport rep(typ);
55   rep.AddMutex(mid);
56   VarSizeStackTrace trace;
57   ObtainCurrentStack(thr, pc, &trace);
58   rep.AddStack(trace, true);
59   rep.AddLocation(addr, 1);
60   OutputReport(thr, rep);
61 }
62 
MutexCreate(ThreadState * thr,uptr pc,uptr addr,u32 flagz)63 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
64   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
65   StatInc(thr, StatMutexCreate);
66   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
67     CHECK(!thr->is_freeing);
68     thr->is_freeing = true;
69     MemoryWrite(thr, pc, addr, kSizeLog1);
70     thr->is_freeing = false;
71   }
72   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
73   s->SetFlags(flagz & MutexCreationFlagMask);
74   if (!SANITIZER_GO && s->creation_stack_id == 0)
75     s->creation_stack_id = CurrentStackId(thr, pc);
76   s->mtx.Unlock();
77 }
78 
MutexDestroy(ThreadState * thr,uptr pc,uptr addr,u32 flagz)79 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
80   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
81   StatInc(thr, StatMutexDestroy);
82   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
83   if (s == 0)
84     return;
85   if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit)) {
86     // Destroy is no-op for linker-initialized mutexes.
87     s->mtx.Unlock();
88     return;
89   }
90   if (common_flags()->detect_deadlocks) {
91     Callback cb(thr, pc);
92     ctx->dd->MutexDestroy(&cb, &s->dd);
93     ctx->dd->MutexInit(&cb, &s->dd);
94   }
95   bool unlock_locked = false;
96   if (flags()->report_destroy_locked
97       && s->owner_tid != SyncVar::kInvalidTid
98       && !s->IsFlagSet(MutexFlagBroken)) {
99     s->SetFlags(MutexFlagBroken);
100     unlock_locked = true;
101   }
102   u64 mid = s->GetId();
103   u32 last_lock = s->last_lock;
104   if (!unlock_locked)
105     s->Reset(thr->proc());  // must not reset it before the report is printed
106   s->mtx.Unlock();
107   if (unlock_locked) {
108     ThreadRegistryLock l(ctx->thread_registry);
109     ScopedReport rep(ReportTypeMutexDestroyLocked);
110     rep.AddMutex(mid);
111     VarSizeStackTrace trace;
112     ObtainCurrentStack(thr, pc, &trace);
113     rep.AddStack(trace);
114     FastState last(last_lock);
115     RestoreStack(last.tid(), last.epoch(), &trace, 0);
116     rep.AddStack(trace, true);
117     rep.AddLocation(addr, 1);
118     OutputReport(thr, rep);
119 
120     SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
121     if (s != 0) {
122       s->Reset(thr->proc());
123       s->mtx.Unlock();
124     }
125   }
126   thr->mset.Remove(mid);
127   // Imitate a memory write to catch unlock-destroy races.
128   // Do this outside of sync mutex, because it can report a race which locks
129   // sync mutexes.
130   if (IsAppMem(addr)) {
131     CHECK(!thr->is_freeing);
132     thr->is_freeing = true;
133     MemoryWrite(thr, pc, addr, kSizeLog1);
134     thr->is_freeing = false;
135   }
136   // s will be destroyed and freed in MetaMap::FreeBlock.
137 }
138 
MutexPreLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)139 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
140   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
141   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
142     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
143     s->UpdateFlags(flagz);
144     if (s->owner_tid != thr->tid) {
145       Callback cb(thr, pc);
146       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
147       s->mtx.ReadUnlock();
148       ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
149     } else {
150       s->mtx.ReadUnlock();
151     }
152   }
153 }
154 
MutexPostLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz,int rec)155 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
156   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
157       thr->tid, addr, flagz, rec);
158   if (flagz & MutexFlagRecursiveLock)
159     CHECK_GT(rec, 0);
160   else
161     rec = 1;
162   if (IsAppMem(addr))
163     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
164   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
165   s->UpdateFlags(flagz);
166   thr->fast_state.IncrementEpoch();
167   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
168   bool report_double_lock = false;
169   if (s->owner_tid == SyncVar::kInvalidTid) {
170     CHECK_EQ(s->recursion, 0);
171     s->owner_tid = thr->tid;
172     s->last_lock = thr->fast_state.raw();
173   } else if (s->owner_tid == thr->tid) {
174     CHECK_GT(s->recursion, 0);
175   } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
176     s->SetFlags(MutexFlagBroken);
177     report_double_lock = true;
178   }
179   const bool first = s->recursion == 0;
180   s->recursion += rec;
181   if (first) {
182     StatInc(thr, StatMutexLock);
183     AcquireImpl(thr, pc, &s->clock);
184     AcquireImpl(thr, pc, &s->read_clock);
185   } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
186     StatInc(thr, StatMutexRecLock);
187   }
188   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
189   bool pre_lock = false;
190   if (first && common_flags()->detect_deadlocks) {
191     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
192         !(flagz & MutexFlagTryLock);
193     Callback cb(thr, pc);
194     if (pre_lock)
195       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
196     ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
197   }
198   u64 mid = s->GetId();
199   s->mtx.Unlock();
200   // Can't touch s after this point.
201   s = 0;
202   if (report_double_lock)
203     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
204   if (first && pre_lock && common_flags()->detect_deadlocks) {
205     Callback cb(thr, pc);
206     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
207   }
208 }
209 
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)210 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
211   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
212   if (IsAppMem(addr))
213     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
214   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
215   thr->fast_state.IncrementEpoch();
216   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
217   int rec = 0;
218   bool report_bad_unlock = false;
219   if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
220     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
221       s->SetFlags(MutexFlagBroken);
222       report_bad_unlock = true;
223     }
224   } else {
225     rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
226     s->recursion -= rec;
227     if (s->recursion == 0) {
228       StatInc(thr, StatMutexUnlock);
229       s->owner_tid = SyncVar::kInvalidTid;
230       ReleaseStoreImpl(thr, pc, &s->clock);
231     } else {
232       StatInc(thr, StatMutexRecUnlock);
233     }
234   }
235   thr->mset.Del(s->GetId(), true);
236   if (common_flags()->detect_deadlocks && s->recursion == 0 &&
237       !report_bad_unlock) {
238     Callback cb(thr, pc);
239     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
240   }
241   u64 mid = s->GetId();
242   s->mtx.Unlock();
243   // Can't touch s after this point.
244   if (report_bad_unlock)
245     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
246   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
247     Callback cb(thr, pc);
248     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
249   }
250   return rec;
251 }
252 
MutexPreReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)253 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
254   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
255   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
256     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
257     s->UpdateFlags(flagz);
258     Callback cb(thr, pc);
259     ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
260     s->mtx.ReadUnlock();
261     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
262   }
263 }
264 
MutexPostReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)265 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
266   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
267   StatInc(thr, StatMutexReadLock);
268   if (IsAppMem(addr))
269     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
270   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
271   s->UpdateFlags(flagz);
272   thr->fast_state.IncrementEpoch();
273   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
274   bool report_bad_lock = false;
275   if (s->owner_tid != SyncVar::kInvalidTid) {
276     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
277       s->SetFlags(MutexFlagBroken);
278       report_bad_lock = true;
279     }
280   }
281   AcquireImpl(thr, pc, &s->clock);
282   s->last_lock = thr->fast_state.raw();
283   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
284   bool pre_lock = false;
285   if (common_flags()->detect_deadlocks) {
286     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
287         !(flagz & MutexFlagTryLock);
288     Callback cb(thr, pc);
289     if (pre_lock)
290       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
291     ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
292   }
293   u64 mid = s->GetId();
294   s->mtx.ReadUnlock();
295   // Can't touch s after this point.
296   s = 0;
297   if (report_bad_lock)
298     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
299   if (pre_lock  && common_flags()->detect_deadlocks) {
300     Callback cb(thr, pc);
301     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
302   }
303 }
304 
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)305 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
306   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
307   StatInc(thr, StatMutexReadUnlock);
308   if (IsAppMem(addr))
309     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
310   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
311   thr->fast_state.IncrementEpoch();
312   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
313   bool report_bad_unlock = false;
314   if (s->owner_tid != SyncVar::kInvalidTid) {
315     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
316       s->SetFlags(MutexFlagBroken);
317       report_bad_unlock = true;
318     }
319   }
320   ReleaseImpl(thr, pc, &s->read_clock);
321   if (common_flags()->detect_deadlocks && s->recursion == 0) {
322     Callback cb(thr, pc);
323     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
324   }
325   u64 mid = s->GetId();
326   s->mtx.Unlock();
327   // Can't touch s after this point.
328   thr->mset.Del(mid, false);
329   if (report_bad_unlock)
330     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
331   if (common_flags()->detect_deadlocks) {
332     Callback cb(thr, pc);
333     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
334   }
335 }
336 
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)337 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
338   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
339   if (IsAppMem(addr))
340     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
341   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
342   bool write = true;
343   bool report_bad_unlock = false;
344   if (s->owner_tid == SyncVar::kInvalidTid) {
345     // Seems to be read unlock.
346     write = false;
347     StatInc(thr, StatMutexReadUnlock);
348     thr->fast_state.IncrementEpoch();
349     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
350     ReleaseImpl(thr, pc, &s->read_clock);
351   } else if (s->owner_tid == thr->tid) {
352     // Seems to be write unlock.
353     thr->fast_state.IncrementEpoch();
354     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
355     CHECK_GT(s->recursion, 0);
356     s->recursion--;
357     if (s->recursion == 0) {
358       StatInc(thr, StatMutexUnlock);
359       s->owner_tid = SyncVar::kInvalidTid;
360       ReleaseImpl(thr, pc, &s->clock);
361     } else {
362       StatInc(thr, StatMutexRecUnlock);
363     }
364   } else if (!s->IsFlagSet(MutexFlagBroken)) {
365     s->SetFlags(MutexFlagBroken);
366     report_bad_unlock = true;
367   }
368   thr->mset.Del(s->GetId(), write);
369   if (common_flags()->detect_deadlocks && s->recursion == 0) {
370     Callback cb(thr, pc);
371     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
372   }
373   u64 mid = s->GetId();
374   s->mtx.Unlock();
375   // Can't touch s after this point.
376   if (report_bad_unlock)
377     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
378   if (common_flags()->detect_deadlocks) {
379     Callback cb(thr, pc);
380     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
381   }
382 }
383 
MutexRepair(ThreadState * thr,uptr pc,uptr addr)384 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
385   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
386   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
387   s->owner_tid = SyncVar::kInvalidTid;
388   s->recursion = 0;
389   s->mtx.Unlock();
390 }
391 
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)392 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
393   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
394   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
395   u64 mid = s->GetId();
396   s->mtx.Unlock();
397   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
398 }
399 
Acquire(ThreadState * thr,uptr pc,uptr addr)400 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
401   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
402   if (thr->ignore_sync)
403     return;
404   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
405   if (!s)
406     return;
407   AcquireImpl(thr, pc, &s->clock);
408   s->mtx.ReadUnlock();
409 }
410 
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)411 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
412   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
413   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
414   u64 epoch = tctx->epoch1;
415   if (tctx->status == ThreadStatusRunning)
416     epoch = tctx->thr->fast_state.epoch();
417   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
418 }
419 
AcquireGlobal(ThreadState * thr,uptr pc)420 void AcquireGlobal(ThreadState *thr, uptr pc) {
421   DPrintf("#%d: AcquireGlobal\n", thr->tid);
422   if (thr->ignore_sync)
423     return;
424   ThreadRegistryLock l(ctx->thread_registry);
425   ctx->thread_registry->RunCallbackForEachThreadLocked(
426       UpdateClockCallback, thr);
427 }
428 
Release(ThreadState * thr,uptr pc,uptr addr)429 void Release(ThreadState *thr, uptr pc, uptr addr) {
430   DPrintf("#%d: Release %zx\n", thr->tid, addr);
431   if (thr->ignore_sync)
432     return;
433   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
434   thr->fast_state.IncrementEpoch();
435   // Can't increment epoch w/o writing to the trace as well.
436   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
437   ReleaseImpl(thr, pc, &s->clock);
438   s->mtx.Unlock();
439 }
440 
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)441 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
442   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
443   if (thr->ignore_sync)
444     return;
445   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
446   thr->fast_state.IncrementEpoch();
447   // Can't increment epoch w/o writing to the trace as well.
448   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
449   ReleaseStoreImpl(thr, pc, &s->clock);
450   s->mtx.Unlock();
451 }
452 
453 #if !SANITIZER_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)454 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
455   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
456   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
457   u64 epoch = tctx->epoch1;
458   if (tctx->status == ThreadStatusRunning)
459     epoch = tctx->thr->fast_state.epoch();
460   thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
461 }
462 
AfterSleep(ThreadState * thr,uptr pc)463 void AfterSleep(ThreadState *thr, uptr pc) {
464   DPrintf("#%d: AfterSleep %zx\n", thr->tid);
465   if (thr->ignore_sync)
466     return;
467   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
468   ThreadRegistryLock l(ctx->thread_registry);
469   ctx->thread_registry->RunCallbackForEachThreadLocked(
470       UpdateSleepClockCallback, thr);
471 }
472 #endif
473 
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)474 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
475   if (thr->ignore_sync)
476     return;
477   thr->clock.set(thr->fast_state.epoch());
478   thr->clock.acquire(&thr->proc()->clock_cache, c);
479   StatInc(thr, StatSyncAcquire);
480 }
481 
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)482 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
483   if (thr->ignore_sync)
484     return;
485   thr->clock.set(thr->fast_state.epoch());
486   thr->fast_synch_epoch = thr->fast_state.epoch();
487   thr->clock.release(&thr->proc()->clock_cache, c);
488   StatInc(thr, StatSyncRelease);
489 }
490 
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)491 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
492   if (thr->ignore_sync)
493     return;
494   thr->clock.set(thr->fast_state.epoch());
495   thr->fast_synch_epoch = thr->fast_state.epoch();
496   thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
497   StatInc(thr, StatSyncRelease);
498 }
499 
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)500 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
501   if (thr->ignore_sync)
502     return;
503   thr->clock.set(thr->fast_state.epoch());
504   thr->fast_synch_epoch = thr->fast_state.epoch();
505   thr->clock.acq_rel(&thr->proc()->clock_cache, c);
506   StatInc(thr, StatSyncAcquire);
507   StatInc(thr, StatSyncRelease);
508 }
509 
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)510 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
511   if (r == 0)
512     return;
513   ThreadRegistryLock l(ctx->thread_registry);
514   ScopedReport rep(ReportTypeDeadlock);
515   for (int i = 0; i < r->n; i++) {
516     rep.AddMutex(r->loop[i].mtx_ctx0);
517     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
518     rep.AddThread((int)r->loop[i].thr_ctx);
519   }
520   uptr dummy_pc = 0x42;
521   for (int i = 0; i < r->n; i++) {
522     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
523       u32 stk = r->loop[i].stk[j];
524       if (stk && stk != 0xffffffff) {
525         rep.AddStack(StackDepotGet(stk), true);
526       } else {
527         // Sometimes we fail to extract the stack trace (FIXME: investigate),
528         // but we should still produce some stack trace in the report.
529         rep.AddStack(StackTrace(&dummy_pc, 1), true);
530       }
531     }
532   }
533   OutputReport(thr, rep);
534 }
535 
536 }  // namespace __tsan
537