110d565efSmrg //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
210d565efSmrg //
310d565efSmrg // This file is distributed under the University of Illinois Open Source
410d565efSmrg // License. See LICENSE.TXT for details.
510d565efSmrg //
610d565efSmrg //===----------------------------------------------------------------------===//
710d565efSmrg //
810d565efSmrg // This file is a part of ThreadSanitizer (TSan), a race detector.
910d565efSmrg //
1010d565efSmrg //===----------------------------------------------------------------------===//
1110d565efSmrg 
1210d565efSmrg #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
1310d565efSmrg #include <sanitizer_common/sanitizer_stackdepot.h>
1410d565efSmrg 
1510d565efSmrg #include "tsan_rtl.h"
1610d565efSmrg #include "tsan_flags.h"
1710d565efSmrg #include "tsan_sync.h"
1810d565efSmrg #include "tsan_report.h"
1910d565efSmrg #include "tsan_symbolize.h"
2010d565efSmrg #include "tsan_platform.h"
2110d565efSmrg 
2210d565efSmrg namespace __tsan {
2310d565efSmrg 
2410d565efSmrg void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
2510d565efSmrg 
2610d565efSmrg struct Callback : DDCallback {
2710d565efSmrg   ThreadState *thr;
2810d565efSmrg   uptr pc;
2910d565efSmrg 
Callback__tsan::Callback3010d565efSmrg   Callback(ThreadState *thr, uptr pc)
3110d565efSmrg       : thr(thr)
3210d565efSmrg       , pc(pc) {
3310d565efSmrg     DDCallback::pt = thr->proc()->dd_pt;
3410d565efSmrg     DDCallback::lt = thr->dd_lt;
3510d565efSmrg   }
3610d565efSmrg 
Unwind__tsan::Callback3710d565efSmrg   u32 Unwind() override { return CurrentStackId(thr, pc); }
UniqueTid__tsan::Callback3810d565efSmrg   int UniqueTid() override { return thr->unique_id; }
3910d565efSmrg };
4010d565efSmrg 
DDMutexInit(ThreadState * thr,uptr pc,SyncVar * s)4110d565efSmrg void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
4210d565efSmrg   Callback cb(thr, pc);
4310d565efSmrg   ctx->dd->MutexInit(&cb, &s->dd);
4410d565efSmrg   s->dd.ctx = s->GetId();
4510d565efSmrg }
4610d565efSmrg 
ReportMutexMisuse(ThreadState * thr,uptr pc,ReportType typ,uptr addr,u64 mid)4710d565efSmrg static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
4810d565efSmrg     uptr addr, u64 mid) {
4910d565efSmrg   // In Go, these misuses are either impossible, or detected by std lib,
5010d565efSmrg   // or false positives (e.g. unlock in a different thread).
5110d565efSmrg   if (SANITIZER_GO)
5210d565efSmrg     return;
5310d565efSmrg   ThreadRegistryLock l(ctx->thread_registry);
5410d565efSmrg   ScopedReport rep(typ);
5510d565efSmrg   rep.AddMutex(mid);
5610d565efSmrg   VarSizeStackTrace trace;
5710d565efSmrg   ObtainCurrentStack(thr, pc, &trace);
5810d565efSmrg   rep.AddStack(trace, true);
5910d565efSmrg   rep.AddLocation(addr, 1);
6010d565efSmrg   OutputReport(thr, rep);
6110d565efSmrg }
6210d565efSmrg 
MutexCreate(ThreadState * thr,uptr pc,uptr addr,u32 flagz)63c7a68eb7Smrg void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
64c7a68eb7Smrg   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
6510d565efSmrg   StatInc(thr, StatMutexCreate);
66c7a68eb7Smrg   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
6710d565efSmrg     CHECK(!thr->is_freeing);
6810d565efSmrg     thr->is_freeing = true;
6910d565efSmrg     MemoryWrite(thr, pc, addr, kSizeLog1);
7010d565efSmrg     thr->is_freeing = false;
7110d565efSmrg   }
7210d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
73c7a68eb7Smrg   s->SetFlags(flagz & MutexCreationFlagMask);
7410d565efSmrg   if (!SANITIZER_GO && s->creation_stack_id == 0)
7510d565efSmrg     s->creation_stack_id = CurrentStackId(thr, pc);
7610d565efSmrg   s->mtx.Unlock();
7710d565efSmrg }
7810d565efSmrg 
MutexDestroy(ThreadState * thr,uptr pc,uptr addr,u32 flagz)79c7a68eb7Smrg void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
8010d565efSmrg   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
8110d565efSmrg   StatInc(thr, StatMutexDestroy);
8210d565efSmrg   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
8310d565efSmrg   if (s == 0)
8410d565efSmrg     return;
85*0fc04c29Smrg   if ((flagz & MutexFlagLinkerInit)
86*0fc04c29Smrg       || s->IsFlagSet(MutexFlagLinkerInit)
87*0fc04c29Smrg       || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
8810d565efSmrg     // Destroy is no-op for linker-initialized mutexes.
8910d565efSmrg     s->mtx.Unlock();
9010d565efSmrg     return;
9110d565efSmrg   }
9210d565efSmrg   if (common_flags()->detect_deadlocks) {
9310d565efSmrg     Callback cb(thr, pc);
9410d565efSmrg     ctx->dd->MutexDestroy(&cb, &s->dd);
9510d565efSmrg     ctx->dd->MutexInit(&cb, &s->dd);
9610d565efSmrg   }
9710d565efSmrg   bool unlock_locked = false;
9810d565efSmrg   if (flags()->report_destroy_locked
9910d565efSmrg       && s->owner_tid != SyncVar::kInvalidTid
100c7a68eb7Smrg       && !s->IsFlagSet(MutexFlagBroken)) {
101c7a68eb7Smrg     s->SetFlags(MutexFlagBroken);
10210d565efSmrg     unlock_locked = true;
10310d565efSmrg   }
10410d565efSmrg   u64 mid = s->GetId();
105*0fc04c29Smrg   u64 last_lock = s->last_lock;
10610d565efSmrg   if (!unlock_locked)
10710d565efSmrg     s->Reset(thr->proc());  // must not reset it before the report is printed
10810d565efSmrg   s->mtx.Unlock();
10910d565efSmrg   if (unlock_locked) {
11010d565efSmrg     ThreadRegistryLock l(ctx->thread_registry);
11110d565efSmrg     ScopedReport rep(ReportTypeMutexDestroyLocked);
11210d565efSmrg     rep.AddMutex(mid);
11310d565efSmrg     VarSizeStackTrace trace;
11410d565efSmrg     ObtainCurrentStack(thr, pc, &trace);
115*0fc04c29Smrg     rep.AddStack(trace, true);
11610d565efSmrg     FastState last(last_lock);
11710d565efSmrg     RestoreStack(last.tid(), last.epoch(), &trace, 0);
11810d565efSmrg     rep.AddStack(trace, true);
11910d565efSmrg     rep.AddLocation(addr, 1);
12010d565efSmrg     OutputReport(thr, rep);
12110d565efSmrg 
12210d565efSmrg     SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
12310d565efSmrg     if (s != 0) {
12410d565efSmrg       s->Reset(thr->proc());
12510d565efSmrg       s->mtx.Unlock();
12610d565efSmrg     }
12710d565efSmrg   }
12810d565efSmrg   thr->mset.Remove(mid);
12910d565efSmrg   // Imitate a memory write to catch unlock-destroy races.
13010d565efSmrg   // Do this outside of sync mutex, because it can report a race which locks
13110d565efSmrg   // sync mutexes.
13210d565efSmrg   if (IsAppMem(addr)) {
13310d565efSmrg     CHECK(!thr->is_freeing);
13410d565efSmrg     thr->is_freeing = true;
13510d565efSmrg     MemoryWrite(thr, pc, addr, kSizeLog1);
13610d565efSmrg     thr->is_freeing = false;
13710d565efSmrg   }
13810d565efSmrg   // s will be destroyed and freed in MetaMap::FreeBlock.
13910d565efSmrg }
14010d565efSmrg 
MutexPreLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)141c7a68eb7Smrg void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
142c7a68eb7Smrg   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
143c7a68eb7Smrg   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
144c7a68eb7Smrg     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
145c7a68eb7Smrg     s->UpdateFlags(flagz);
146c7a68eb7Smrg     if (s->owner_tid != thr->tid) {
147c7a68eb7Smrg       Callback cb(thr, pc);
148c7a68eb7Smrg       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
149c7a68eb7Smrg       s->mtx.ReadUnlock();
150c7a68eb7Smrg       ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
151c7a68eb7Smrg     } else {
152c7a68eb7Smrg       s->mtx.ReadUnlock();
153c7a68eb7Smrg     }
154c7a68eb7Smrg   }
155c7a68eb7Smrg }
156c7a68eb7Smrg 
MutexPostLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz,int rec)157c7a68eb7Smrg void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
158c7a68eb7Smrg   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
159c7a68eb7Smrg       thr->tid, addr, flagz, rec);
160c7a68eb7Smrg   if (flagz & MutexFlagRecursiveLock)
16110d565efSmrg     CHECK_GT(rec, 0);
162c7a68eb7Smrg   else
163c7a68eb7Smrg     rec = 1;
16410d565efSmrg   if (IsAppMem(addr))
16510d565efSmrg     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
16610d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
167c7a68eb7Smrg   s->UpdateFlags(flagz);
16810d565efSmrg   thr->fast_state.IncrementEpoch();
16910d565efSmrg   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
17010d565efSmrg   bool report_double_lock = false;
17110d565efSmrg   if (s->owner_tid == SyncVar::kInvalidTid) {
17210d565efSmrg     CHECK_EQ(s->recursion, 0);
17310d565efSmrg     s->owner_tid = thr->tid;
17410d565efSmrg     s->last_lock = thr->fast_state.raw();
17510d565efSmrg   } else if (s->owner_tid == thr->tid) {
17610d565efSmrg     CHECK_GT(s->recursion, 0);
177c7a68eb7Smrg   } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
178c7a68eb7Smrg     s->SetFlags(MutexFlagBroken);
17910d565efSmrg     report_double_lock = true;
18010d565efSmrg   }
181c7a68eb7Smrg   const bool first = s->recursion == 0;
182c7a68eb7Smrg   s->recursion += rec;
183c7a68eb7Smrg   if (first) {
18410d565efSmrg     StatInc(thr, StatMutexLock);
18510d565efSmrg     AcquireImpl(thr, pc, &s->clock);
18610d565efSmrg     AcquireImpl(thr, pc, &s->read_clock);
187c7a68eb7Smrg   } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
18810d565efSmrg     StatInc(thr, StatMutexRecLock);
18910d565efSmrg   }
19010d565efSmrg   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
191c7a68eb7Smrg   bool pre_lock = false;
192c7a68eb7Smrg   if (first && common_flags()->detect_deadlocks) {
193c7a68eb7Smrg     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
194c7a68eb7Smrg         !(flagz & MutexFlagTryLock);
19510d565efSmrg     Callback cb(thr, pc);
196c7a68eb7Smrg     if (pre_lock)
19710d565efSmrg       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
198c7a68eb7Smrg     ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
19910d565efSmrg   }
20010d565efSmrg   u64 mid = s->GetId();
20110d565efSmrg   s->mtx.Unlock();
20210d565efSmrg   // Can't touch s after this point.
203c7a68eb7Smrg   s = 0;
20410d565efSmrg   if (report_double_lock)
20510d565efSmrg     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
206c7a68eb7Smrg   if (first && pre_lock && common_flags()->detect_deadlocks) {
20710d565efSmrg     Callback cb(thr, pc);
20810d565efSmrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
20910d565efSmrg   }
21010d565efSmrg }
21110d565efSmrg 
MutexUnlock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)212c7a68eb7Smrg int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
213c7a68eb7Smrg   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
21410d565efSmrg   if (IsAppMem(addr))
21510d565efSmrg     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
21610d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
21710d565efSmrg   thr->fast_state.IncrementEpoch();
21810d565efSmrg   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
21910d565efSmrg   int rec = 0;
22010d565efSmrg   bool report_bad_unlock = false;
22110d565efSmrg   if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
222c7a68eb7Smrg     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
223c7a68eb7Smrg       s->SetFlags(MutexFlagBroken);
22410d565efSmrg       report_bad_unlock = true;
22510d565efSmrg     }
22610d565efSmrg   } else {
227c7a68eb7Smrg     rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
22810d565efSmrg     s->recursion -= rec;
22910d565efSmrg     if (s->recursion == 0) {
23010d565efSmrg       StatInc(thr, StatMutexUnlock);
23110d565efSmrg       s->owner_tid = SyncVar::kInvalidTid;
23210d565efSmrg       ReleaseStoreImpl(thr, pc, &s->clock);
23310d565efSmrg     } else {
23410d565efSmrg       StatInc(thr, StatMutexRecUnlock);
23510d565efSmrg     }
23610d565efSmrg   }
23710d565efSmrg   thr->mset.Del(s->GetId(), true);
23810d565efSmrg   if (common_flags()->detect_deadlocks && s->recursion == 0 &&
23910d565efSmrg       !report_bad_unlock) {
24010d565efSmrg     Callback cb(thr, pc);
24110d565efSmrg     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
24210d565efSmrg   }
24310d565efSmrg   u64 mid = s->GetId();
24410d565efSmrg   s->mtx.Unlock();
24510d565efSmrg   // Can't touch s after this point.
24610d565efSmrg   if (report_bad_unlock)
24710d565efSmrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
24810d565efSmrg   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
24910d565efSmrg     Callback cb(thr, pc);
25010d565efSmrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
25110d565efSmrg   }
25210d565efSmrg   return rec;
25310d565efSmrg }
25410d565efSmrg 
MutexPreReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)255c7a68eb7Smrg void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
256c7a68eb7Smrg   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
257c7a68eb7Smrg   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
258c7a68eb7Smrg     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
259c7a68eb7Smrg     s->UpdateFlags(flagz);
260c7a68eb7Smrg     Callback cb(thr, pc);
261c7a68eb7Smrg     ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
262c7a68eb7Smrg     s->mtx.ReadUnlock();
263c7a68eb7Smrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
264c7a68eb7Smrg   }
265c7a68eb7Smrg }
266c7a68eb7Smrg 
MutexPostReadLock(ThreadState * thr,uptr pc,uptr addr,u32 flagz)267c7a68eb7Smrg void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
268c7a68eb7Smrg   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
26910d565efSmrg   StatInc(thr, StatMutexReadLock);
27010d565efSmrg   if (IsAppMem(addr))
27110d565efSmrg     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
27210d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
273c7a68eb7Smrg   s->UpdateFlags(flagz);
27410d565efSmrg   thr->fast_state.IncrementEpoch();
27510d565efSmrg   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
27610d565efSmrg   bool report_bad_lock = false;
27710d565efSmrg   if (s->owner_tid != SyncVar::kInvalidTid) {
278c7a68eb7Smrg     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
279c7a68eb7Smrg       s->SetFlags(MutexFlagBroken);
28010d565efSmrg       report_bad_lock = true;
28110d565efSmrg     }
28210d565efSmrg   }
28310d565efSmrg   AcquireImpl(thr, pc, &s->clock);
28410d565efSmrg   s->last_lock = thr->fast_state.raw();
28510d565efSmrg   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
286c7a68eb7Smrg   bool pre_lock = false;
287c7a68eb7Smrg   if (common_flags()->detect_deadlocks) {
288c7a68eb7Smrg     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
289c7a68eb7Smrg         !(flagz & MutexFlagTryLock);
29010d565efSmrg     Callback cb(thr, pc);
291c7a68eb7Smrg     if (pre_lock)
29210d565efSmrg       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
293c7a68eb7Smrg     ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
29410d565efSmrg   }
29510d565efSmrg   u64 mid = s->GetId();
29610d565efSmrg   s->mtx.ReadUnlock();
29710d565efSmrg   // Can't touch s after this point.
298c7a68eb7Smrg   s = 0;
29910d565efSmrg   if (report_bad_lock)
30010d565efSmrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
301c7a68eb7Smrg   if (pre_lock  && common_flags()->detect_deadlocks) {
30210d565efSmrg     Callback cb(thr, pc);
30310d565efSmrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
30410d565efSmrg   }
30510d565efSmrg }
30610d565efSmrg 
MutexReadUnlock(ThreadState * thr,uptr pc,uptr addr)30710d565efSmrg void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
30810d565efSmrg   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
30910d565efSmrg   StatInc(thr, StatMutexReadUnlock);
31010d565efSmrg   if (IsAppMem(addr))
31110d565efSmrg     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
31210d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
31310d565efSmrg   thr->fast_state.IncrementEpoch();
31410d565efSmrg   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
31510d565efSmrg   bool report_bad_unlock = false;
31610d565efSmrg   if (s->owner_tid != SyncVar::kInvalidTid) {
317c7a68eb7Smrg     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
318c7a68eb7Smrg       s->SetFlags(MutexFlagBroken);
31910d565efSmrg       report_bad_unlock = true;
32010d565efSmrg     }
32110d565efSmrg   }
32210d565efSmrg   ReleaseImpl(thr, pc, &s->read_clock);
32310d565efSmrg   if (common_flags()->detect_deadlocks && s->recursion == 0) {
32410d565efSmrg     Callback cb(thr, pc);
32510d565efSmrg     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
32610d565efSmrg   }
32710d565efSmrg   u64 mid = s->GetId();
32810d565efSmrg   s->mtx.Unlock();
32910d565efSmrg   // Can't touch s after this point.
33010d565efSmrg   thr->mset.Del(mid, false);
33110d565efSmrg   if (report_bad_unlock)
33210d565efSmrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
33310d565efSmrg   if (common_flags()->detect_deadlocks) {
33410d565efSmrg     Callback cb(thr, pc);
33510d565efSmrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
33610d565efSmrg   }
33710d565efSmrg }
33810d565efSmrg 
MutexReadOrWriteUnlock(ThreadState * thr,uptr pc,uptr addr)33910d565efSmrg void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
34010d565efSmrg   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
34110d565efSmrg   if (IsAppMem(addr))
34210d565efSmrg     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
34310d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
34410d565efSmrg   bool write = true;
34510d565efSmrg   bool report_bad_unlock = false;
34610d565efSmrg   if (s->owner_tid == SyncVar::kInvalidTid) {
34710d565efSmrg     // Seems to be read unlock.
34810d565efSmrg     write = false;
34910d565efSmrg     StatInc(thr, StatMutexReadUnlock);
35010d565efSmrg     thr->fast_state.IncrementEpoch();
35110d565efSmrg     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
35210d565efSmrg     ReleaseImpl(thr, pc, &s->read_clock);
35310d565efSmrg   } else if (s->owner_tid == thr->tid) {
35410d565efSmrg     // Seems to be write unlock.
35510d565efSmrg     thr->fast_state.IncrementEpoch();
35610d565efSmrg     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
35710d565efSmrg     CHECK_GT(s->recursion, 0);
35810d565efSmrg     s->recursion--;
35910d565efSmrg     if (s->recursion == 0) {
36010d565efSmrg       StatInc(thr, StatMutexUnlock);
36110d565efSmrg       s->owner_tid = SyncVar::kInvalidTid;
362*0fc04c29Smrg       ReleaseStoreImpl(thr, pc, &s->clock);
36310d565efSmrg     } else {
36410d565efSmrg       StatInc(thr, StatMutexRecUnlock);
36510d565efSmrg     }
366c7a68eb7Smrg   } else if (!s->IsFlagSet(MutexFlagBroken)) {
367c7a68eb7Smrg     s->SetFlags(MutexFlagBroken);
36810d565efSmrg     report_bad_unlock = true;
36910d565efSmrg   }
37010d565efSmrg   thr->mset.Del(s->GetId(), write);
37110d565efSmrg   if (common_flags()->detect_deadlocks && s->recursion == 0) {
37210d565efSmrg     Callback cb(thr, pc);
37310d565efSmrg     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
37410d565efSmrg   }
37510d565efSmrg   u64 mid = s->GetId();
37610d565efSmrg   s->mtx.Unlock();
37710d565efSmrg   // Can't touch s after this point.
37810d565efSmrg   if (report_bad_unlock)
37910d565efSmrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
38010d565efSmrg   if (common_flags()->detect_deadlocks) {
38110d565efSmrg     Callback cb(thr, pc);
38210d565efSmrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
38310d565efSmrg   }
38410d565efSmrg }
38510d565efSmrg 
MutexRepair(ThreadState * thr,uptr pc,uptr addr)38610d565efSmrg void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
38710d565efSmrg   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
38810d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
38910d565efSmrg   s->owner_tid = SyncVar::kInvalidTid;
39010d565efSmrg   s->recursion = 0;
39110d565efSmrg   s->mtx.Unlock();
39210d565efSmrg }
39310d565efSmrg 
MutexInvalidAccess(ThreadState * thr,uptr pc,uptr addr)39410d565efSmrg void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
39510d565efSmrg   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
39610d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
39710d565efSmrg   u64 mid = s->GetId();
39810d565efSmrg   s->mtx.Unlock();
39910d565efSmrg   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
40010d565efSmrg }
40110d565efSmrg 
Acquire(ThreadState * thr,uptr pc,uptr addr)40210d565efSmrg void Acquire(ThreadState *thr, uptr pc, uptr addr) {
40310d565efSmrg   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
40410d565efSmrg   if (thr->ignore_sync)
40510d565efSmrg     return;
40610d565efSmrg   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
40710d565efSmrg   if (!s)
40810d565efSmrg     return;
40910d565efSmrg   AcquireImpl(thr, pc, &s->clock);
41010d565efSmrg   s->mtx.ReadUnlock();
41110d565efSmrg }
41210d565efSmrg 
UpdateClockCallback(ThreadContextBase * tctx_base,void * arg)41310d565efSmrg static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
41410d565efSmrg   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
41510d565efSmrg   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
416c7a68eb7Smrg   u64 epoch = tctx->epoch1;
41710d565efSmrg   if (tctx->status == ThreadStatusRunning)
418c7a68eb7Smrg     epoch = tctx->thr->fast_state.epoch();
419c7a68eb7Smrg   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
42010d565efSmrg }
42110d565efSmrg 
AcquireGlobal(ThreadState * thr,uptr pc)42210d565efSmrg void AcquireGlobal(ThreadState *thr, uptr pc) {
42310d565efSmrg   DPrintf("#%d: AcquireGlobal\n", thr->tid);
42410d565efSmrg   if (thr->ignore_sync)
42510d565efSmrg     return;
42610d565efSmrg   ThreadRegistryLock l(ctx->thread_registry);
42710d565efSmrg   ctx->thread_registry->RunCallbackForEachThreadLocked(
42810d565efSmrg       UpdateClockCallback, thr);
42910d565efSmrg }
43010d565efSmrg 
Release(ThreadState * thr,uptr pc,uptr addr)43110d565efSmrg void Release(ThreadState *thr, uptr pc, uptr addr) {
43210d565efSmrg   DPrintf("#%d: Release %zx\n", thr->tid, addr);
43310d565efSmrg   if (thr->ignore_sync)
43410d565efSmrg     return;
43510d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
43610d565efSmrg   thr->fast_state.IncrementEpoch();
43710d565efSmrg   // Can't increment epoch w/o writing to the trace as well.
43810d565efSmrg   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
43910d565efSmrg   ReleaseImpl(thr, pc, &s->clock);
44010d565efSmrg   s->mtx.Unlock();
44110d565efSmrg }
44210d565efSmrg 
ReleaseStore(ThreadState * thr,uptr pc,uptr addr)44310d565efSmrg void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
44410d565efSmrg   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
44510d565efSmrg   if (thr->ignore_sync)
44610d565efSmrg     return;
44710d565efSmrg   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
44810d565efSmrg   thr->fast_state.IncrementEpoch();
44910d565efSmrg   // Can't increment epoch w/o writing to the trace as well.
45010d565efSmrg   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
45110d565efSmrg   ReleaseStoreImpl(thr, pc, &s->clock);
45210d565efSmrg   s->mtx.Unlock();
45310d565efSmrg }
45410d565efSmrg 
45510d565efSmrg #if !SANITIZER_GO
UpdateSleepClockCallback(ThreadContextBase * tctx_base,void * arg)45610d565efSmrg static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
45710d565efSmrg   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
45810d565efSmrg   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
459c7a68eb7Smrg   u64 epoch = tctx->epoch1;
46010d565efSmrg   if (tctx->status == ThreadStatusRunning)
461c7a68eb7Smrg     epoch = tctx->thr->fast_state.epoch();
462c7a68eb7Smrg   thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
46310d565efSmrg }
46410d565efSmrg 
AfterSleep(ThreadState * thr,uptr pc)46510d565efSmrg void AfterSleep(ThreadState *thr, uptr pc) {
46610d565efSmrg   DPrintf("#%d: AfterSleep %zx\n", thr->tid);
46710d565efSmrg   if (thr->ignore_sync)
46810d565efSmrg     return;
46910d565efSmrg   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
47010d565efSmrg   ThreadRegistryLock l(ctx->thread_registry);
47110d565efSmrg   ctx->thread_registry->RunCallbackForEachThreadLocked(
47210d565efSmrg       UpdateSleepClockCallback, thr);
47310d565efSmrg }
47410d565efSmrg #endif
47510d565efSmrg 
AcquireImpl(ThreadState * thr,uptr pc,SyncClock * c)47610d565efSmrg void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
47710d565efSmrg   if (thr->ignore_sync)
47810d565efSmrg     return;
47910d565efSmrg   thr->clock.set(thr->fast_state.epoch());
48010d565efSmrg   thr->clock.acquire(&thr->proc()->clock_cache, c);
48110d565efSmrg   StatInc(thr, StatSyncAcquire);
48210d565efSmrg }
48310d565efSmrg 
ReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)48410d565efSmrg void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
48510d565efSmrg   if (thr->ignore_sync)
48610d565efSmrg     return;
48710d565efSmrg   thr->clock.set(thr->fast_state.epoch());
48810d565efSmrg   thr->fast_synch_epoch = thr->fast_state.epoch();
48910d565efSmrg   thr->clock.release(&thr->proc()->clock_cache, c);
49010d565efSmrg   StatInc(thr, StatSyncRelease);
49110d565efSmrg }
49210d565efSmrg 
ReleaseStoreImpl(ThreadState * thr,uptr pc,SyncClock * c)49310d565efSmrg void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
49410d565efSmrg   if (thr->ignore_sync)
49510d565efSmrg     return;
49610d565efSmrg   thr->clock.set(thr->fast_state.epoch());
49710d565efSmrg   thr->fast_synch_epoch = thr->fast_state.epoch();
49810d565efSmrg   thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
49910d565efSmrg   StatInc(thr, StatSyncRelease);
50010d565efSmrg }
50110d565efSmrg 
AcquireReleaseImpl(ThreadState * thr,uptr pc,SyncClock * c)50210d565efSmrg void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
50310d565efSmrg   if (thr->ignore_sync)
50410d565efSmrg     return;
50510d565efSmrg   thr->clock.set(thr->fast_state.epoch());
50610d565efSmrg   thr->fast_synch_epoch = thr->fast_state.epoch();
50710d565efSmrg   thr->clock.acq_rel(&thr->proc()->clock_cache, c);
50810d565efSmrg   StatInc(thr, StatSyncAcquire);
50910d565efSmrg   StatInc(thr, StatSyncRelease);
51010d565efSmrg }
51110d565efSmrg 
ReportDeadlock(ThreadState * thr,uptr pc,DDReport * r)51210d565efSmrg void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
51310d565efSmrg   if (r == 0)
51410d565efSmrg     return;
51510d565efSmrg   ThreadRegistryLock l(ctx->thread_registry);
51610d565efSmrg   ScopedReport rep(ReportTypeDeadlock);
51710d565efSmrg   for (int i = 0; i < r->n; i++) {
51810d565efSmrg     rep.AddMutex(r->loop[i].mtx_ctx0);
51910d565efSmrg     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
52010d565efSmrg     rep.AddThread((int)r->loop[i].thr_ctx);
52110d565efSmrg   }
52210d565efSmrg   uptr dummy_pc = 0x42;
52310d565efSmrg   for (int i = 0; i < r->n; i++) {
52410d565efSmrg     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
52510d565efSmrg       u32 stk = r->loop[i].stk[j];
52610d565efSmrg       if (stk && stk != 0xffffffff) {
52710d565efSmrg         rep.AddStack(StackDepotGet(stk), true);
52810d565efSmrg       } else {
52910d565efSmrg         // Sometimes we fail to extract the stack trace (FIXME: investigate),
53010d565efSmrg         // but we should still produce some stack trace in the report.
53110d565efSmrg         rep.AddStack(StackTrace(&dummy_pc, 1), true);
53210d565efSmrg       }
53310d565efSmrg     }
53410d565efSmrg   }
53510d565efSmrg   OutputReport(thr, rep);
53610d565efSmrg }
53710d565efSmrg 
53810d565efSmrg }  // namespace __tsan
539