1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #ifndef SANITIZER_MUTEX_H
13 #define SANITIZER_MUTEX_H
14 
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_internal_defs.h"
17 #include "sanitizer_libc.h"
18 
19 namespace __sanitizer {
20 
21 class StaticSpinMutex {
22  public:
Init()23   void Init() {
24     atomic_store(&state_, 0, memory_order_relaxed);
25   }
26 
Lock()27   void Lock() {
28     if (TryLock())
29       return;
30     LockSlow();
31   }
32 
TryLock()33   bool TryLock() {
34     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
35   }
36 
Unlock()37   void Unlock() {
38     atomic_store(&state_, 0, memory_order_release);
39   }
40 
CheckLocked()41   void CheckLocked() {
42     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
43   }
44 
45  private:
46   atomic_uint8_t state_;
47 
LockSlow()48   void NOINLINE LockSlow() {
49     for (int i = 0;; i++) {
50       if (i < 10)
51         proc_yield(10);
52       else
53         internal_sched_yield();
54       if (atomic_load(&state_, memory_order_relaxed) == 0
55           && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
56         return;
57     }
58   }
59 };
60 
61 class SpinMutex : public StaticSpinMutex {
62  public:
SpinMutex()63   SpinMutex() {
64     Init();
65   }
66 
67  private:
68   SpinMutex(const SpinMutex&);
69   void operator=(const SpinMutex&);
70 };
71 
72 class BlockingMutex {
73  public:
74 #if SANITIZER_WINDOWS
75   // Windows does not currently support LinkerInitialized
76   explicit BlockingMutex(LinkerInitialized);
77 #else
78   explicit constexpr BlockingMutex(LinkerInitialized)
79       : opaque_storage_ {0, }, owner_(0) {}
80 #endif
81   BlockingMutex();
82   void Lock();
83   void Unlock();
84   void CheckLocked();
85  private:
86   uptr opaque_storage_[10];
87   uptr owner_;  // for debugging
88 };
89 
90 // Reader-writer spin mutex.
91 class RWMutex {
92  public:
RWMutex()93   RWMutex() {
94     atomic_store(&state_, kUnlocked, memory_order_relaxed);
95   }
96 
~RWMutex()97   ~RWMutex() {
98     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
99   }
100 
Lock()101   void Lock() {
102     u32 cmp = kUnlocked;
103     if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
104                                        memory_order_acquire))
105       return;
106     LockSlow();
107   }
108 
Unlock()109   void Unlock() {
110     u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
111     DCHECK_NE(prev & kWriteLock, 0);
112     (void)prev;
113   }
114 
ReadLock()115   void ReadLock() {
116     u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
117     if ((prev & kWriteLock) == 0)
118       return;
119     ReadLockSlow();
120   }
121 
ReadUnlock()122   void ReadUnlock() {
123     u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
124     DCHECK_EQ(prev & kWriteLock, 0);
125     DCHECK_GT(prev & ~kWriteLock, 0);
126     (void)prev;
127   }
128 
CheckLocked()129   void CheckLocked() {
130     CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
131   }
132 
133  private:
134   atomic_uint32_t state_;
135 
136   enum {
137     kUnlocked = 0,
138     kWriteLock = 1,
139     kReadLock = 2
140   };
141 
LockSlow()142   void NOINLINE LockSlow() {
143     for (int i = 0;; i++) {
144       if (i < 10)
145         proc_yield(10);
146       else
147         internal_sched_yield();
148       u32 cmp = atomic_load(&state_, memory_order_relaxed);
149       if (cmp == kUnlocked &&
150           atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
151                                        memory_order_acquire))
152           return;
153     }
154   }
155 
ReadLockSlow()156   void NOINLINE ReadLockSlow() {
157     for (int i = 0;; i++) {
158       if (i < 10)
159         proc_yield(10);
160       else
161         internal_sched_yield();
162       u32 prev = atomic_load(&state_, memory_order_acquire);
163       if ((prev & kWriteLock) == 0)
164         return;
165     }
166   }
167 
168   RWMutex(const RWMutex&);
169   void operator = (const RWMutex&);
170 };
171 
172 template<typename MutexType>
173 class GenericScopedLock {
174  public:
GenericScopedLock(MutexType * mu)175   explicit GenericScopedLock(MutexType *mu)
176       : mu_(mu) {
177     mu_->Lock();
178   }
179 
~GenericScopedLock()180   ~GenericScopedLock() {
181     mu_->Unlock();
182   }
183 
184  private:
185   MutexType *mu_;
186 
187   GenericScopedLock(const GenericScopedLock&);
188   void operator=(const GenericScopedLock&);
189 };
190 
191 template<typename MutexType>
192 class GenericScopedReadLock {
193  public:
GenericScopedReadLock(MutexType * mu)194   explicit GenericScopedReadLock(MutexType *mu)
195       : mu_(mu) {
196     mu_->ReadLock();
197   }
198 
~GenericScopedReadLock()199   ~GenericScopedReadLock() {
200     mu_->ReadUnlock();
201   }
202 
203  private:
204   MutexType *mu_;
205 
206   GenericScopedReadLock(const GenericScopedReadLock&);
207   void operator=(const GenericScopedReadLock&);
208 };
209 
210 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
211 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
212 typedef GenericScopedLock<RWMutex> RWMutexLock;
213 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
214 
215 }  // namespace __sanitizer
216 
217 #endif  // SANITIZER_MUTEX_H
218