1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "SkSharedMutex.h"
9 
10 #include "SkAtomics.h"
11 #include "SkTypes.h"
12 #include "SkSemaphore.h"
13 
14 #if !defined(__has_feature)
15     #define __has_feature(x) 0
16 #endif
17 
18 #if __has_feature(thread_sanitizer)
19 
20     /* Report that a lock has been created at address "lock". */
21     #define ANNOTATE_RWLOCK_CREATE(lock) \
22         AnnotateRWLockCreate(__FILE__, __LINE__, lock)
23 
24     /* Report that the lock at address "lock" is about to be destroyed. */
25     #define ANNOTATE_RWLOCK_DESTROY(lock) \
26         AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
27 
28     /* Report that the lock at address "lock" has been acquired.
29        is_w=1 for writer lock, is_w=0 for reader lock. */
30     #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
31         AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
32 
33     /* Report that the lock at address "lock" is about to be released. */
34     #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
35       AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
36 
37     #if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
38         #if defined(__GNUC__)
39             #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
40         #else
41             /* TODO(glider): for Windows support we may want to change this macro in order
42                to prepend __declspec(selectany) to the annotations' declarations. */
43             #error weak annotations are not supported for your compiler
44         #endif
45     #else
46         #define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
47     #endif
48 
49 #ifdef __GNUC__
50 #pragma GCC visibility push(default)
51 #endif
52 
53     extern "C" {
54     void AnnotateRWLockCreate(
55         const char *file, int line,
56         const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
57     void AnnotateRWLockDestroy(
58         const char *file, int line,
59         const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
60     void AnnotateRWLockAcquired(
61         const char *file, int line,
62         const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
63     void AnnotateRWLockReleased(
64         const char *file, int line,
65         const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
66     }
67 
68 #ifdef __GNUC__
69 #pragma GCC visibility pop
70 #endif
71 
72 #else
73 
74     #define ANNOTATE_RWLOCK_CREATE(lock)
75     #define ANNOTATE_RWLOCK_DESTROY(lock)
76     #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)
77     #define ANNOTATE_RWLOCK_RELEASED(lock, is_w)
78 
79 #endif
80 
81 #ifdef SK_DEBUG
82 
83     #include "SkThreadID.h"
84     #include "SkTDArray.h"
85 
86     class SkSharedMutex::ThreadIDSet {
87     public:
88         // Returns true if threadID is in the set.
find(SkThreadID threadID) const89         bool find(SkThreadID threadID) const {
90             for (auto& t : fThreadIDs) {
91                 if (t == threadID) return true;
92             }
93             return false;
94         }
95 
96         // Returns true if did not already exist.
tryAdd(SkThreadID threadID)97         bool tryAdd(SkThreadID threadID) {
98             for (auto& t : fThreadIDs) {
99                 if (t == threadID) return false;
100             }
101             fThreadIDs.append(1, &threadID);
102             return true;
103         }
104         // Returns true if already exists in Set.
tryRemove(SkThreadID threadID)105         bool tryRemove(SkThreadID threadID) {
106             for (int i = 0; i < fThreadIDs.count(); ++i) {
107                 if (fThreadIDs[i] == threadID) {
108                     fThreadIDs.remove(i);
109                     return true;
110                 }
111             }
112             return false;
113         }
114 
swap(ThreadIDSet & other)115         void swap(ThreadIDSet& other) {
116             fThreadIDs.swap(other.fThreadIDs);
117         }
118 
count() const119         int count() const {
120             return fThreadIDs.count();
121         }
122 
123     private:
124         SkTDArray<SkThreadID> fThreadIDs;
125     };
126 
SkSharedMutex()127     SkSharedMutex::SkSharedMutex()
128         : fCurrentShared(new ThreadIDSet)
129         , fWaitingExclusive(new ThreadIDSet)
130         , fWaitingShared(new ThreadIDSet){
131         ANNOTATE_RWLOCK_CREATE(this);
132     }
133 
~SkSharedMutex()134     SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
135 
acquire()136     void SkSharedMutex::acquire() {
137         SkThreadID threadID(SkGetThreadID());
138         int currentSharedCount;
139         int waitingExclusiveCount;
140         {
141             SkAutoMutexAcquire l(&fMu);
142 
143             if (!fWaitingExclusive->tryAdd(threadID)) {
144                 SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threadID);
145             }
146 
147             currentSharedCount = fCurrentShared->count();
148             waitingExclusiveCount = fWaitingExclusive->count();
149         }
150 
151         if (currentSharedCount > 0 || waitingExclusiveCount > 1) {
152             fExclusiveQueue.wait();
153         }
154 
155         ANNOTATE_RWLOCK_ACQUIRED(this, 1);
156     }
157 
158     // Implementation Detail:
159     // The shared threads need two seperate queues to keep the threads that were added after the
160     // exclusive lock separate from the threads added before.
release()161     void SkSharedMutex::release() {
162         ANNOTATE_RWLOCK_RELEASED(this, 1);
163         SkThreadID threadID(SkGetThreadID());
164         int sharedWaitingCount;
165         int exclusiveWaitingCount;
166         int sharedQueueSelect;
167         {
168             SkAutoMutexAcquire l(&fMu);
169             SkASSERT(0 == fCurrentShared->count());
170             if (!fWaitingExclusive->tryRemove(threadID)) {
171                 SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadID);
172             }
173             exclusiveWaitingCount = fWaitingExclusive->count();
174             sharedWaitingCount = fWaitingShared->count();
175             fWaitingShared.swap(fCurrentShared);
176             sharedQueueSelect = fSharedQueueSelect;
177             if (sharedWaitingCount > 0) {
178                 fSharedQueueSelect = 1 - fSharedQueueSelect;
179             }
180         }
181 
182         if (sharedWaitingCount > 0) {
183             fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
184         } else if (exclusiveWaitingCount > 0) {
185             fExclusiveQueue.signal();
186         }
187     }
188 
assertHeld() const189     void SkSharedMutex::assertHeld() const {
190         SkThreadID threadID(SkGetThreadID());
191         SkAutoMutexAcquire l(&fMu);
192         SkASSERT(0 == fCurrentShared->count());
193         SkASSERT(fWaitingExclusive->find(threadID));
194     }
195 
acquireShared()196     void SkSharedMutex::acquireShared() {
197         SkThreadID threadID(SkGetThreadID());
198         int exclusiveWaitingCount;
199         int sharedQueueSelect;
200         {
201             SkAutoMutexAcquire l(&fMu);
202             exclusiveWaitingCount = fWaitingExclusive->count();
203             if (exclusiveWaitingCount > 0) {
204                 if (!fWaitingShared->tryAdd(threadID)) {
205                     SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID);
206                 }
207             } else {
208                 if (!fCurrentShared->tryAdd(threadID)) {
209                     SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", threadID);
210                 }
211             }
212             sharedQueueSelect = fSharedQueueSelect;
213         }
214 
215         if (exclusiveWaitingCount > 0) {
216             fSharedQueue[sharedQueueSelect].wait();
217         }
218 
219         ANNOTATE_RWLOCK_ACQUIRED(this, 0);
220     }
221 
releaseShared()222     void SkSharedMutex::releaseShared() {
223         ANNOTATE_RWLOCK_RELEASED(this, 0);
224         SkThreadID threadID(SkGetThreadID());
225 
226         int currentSharedCount;
227         int waitingExclusiveCount;
228         {
229             SkAutoMutexAcquire l(&fMu);
230             if (!fCurrentShared->tryRemove(threadID)) {
231                 SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", threadID);
232             }
233             currentSharedCount = fCurrentShared->count();
234             waitingExclusiveCount = fWaitingExclusive->count();
235         }
236 
237         if (0 == currentSharedCount && waitingExclusiveCount > 0) {
238             fExclusiveQueue.signal();
239         }
240     }
241 
assertHeldShared() const242     void SkSharedMutex::assertHeldShared() const {
243         SkThreadID threadID(SkGetThreadID());
244         SkAutoMutexAcquire l(&fMu);
245         SkASSERT(fCurrentShared->find(threadID));
246     }
247 
248 #else
249 
250     // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
251     // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
252     // the log of the count which is 1024.
253     //
254     // The three counts held in fQueueCounts are:
255     // * Shared - the number of shared lock holders currently running.
256     // * WaitingExclusive - the number of threads waiting for an exclusive lock.
257     // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
258     //   to finish.
259     static const int kLogThreadCount = 10;
260 
261     enum {
262         kSharedOffset          = (0 * kLogThreadCount),
263         kWaitingExlusiveOffset = (1 * kLogThreadCount),
264         kWaitingSharedOffset   = (2 * kLogThreadCount),
265         kSharedMask            = ((1 << kLogThreadCount) - 1) << kSharedOffset,
266         kWaitingExclusiveMask  = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
267         kWaitingSharedMask     = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
268     };
269 
SkSharedMutex()270     SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
~SkSharedMutex()271     SkSharedMutex::~SkSharedMutex() {  ANNOTATE_RWLOCK_DESTROY(this); }
acquire()272     void SkSharedMutex::acquire() {
273         // Increment the count of exclusive queue waiters.
274         int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
275                                                         sk_memory_order_acquire);
276 
277         // If there are no other exclusive waiters and no shared threads are running then run
278         // else wait.
279         if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
280             fExclusiveQueue.wait();
281         }
282         ANNOTATE_RWLOCK_ACQUIRED(this, 1);
283     }
284 
release()285     void SkSharedMutex::release() {
286         ANNOTATE_RWLOCK_RELEASED(this, 1);
287 
288         int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
289         int32_t waitingShared;
290         int32_t newQueueCounts;
291         do {
292             newQueueCounts = oldQueueCounts;
293 
294             // Decrement exclusive waiters.
295             newQueueCounts -= 1 << kWaitingExlusiveOffset;
296 
297             // The number of threads waiting to acquire a shared lock.
298             waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
299 
300             // If there are any move the counts of all the shared waiters to actual shared. They are
301             // going to run next.
302             if (waitingShared > 0) {
303 
304                 // Set waiting shared to zero.
305                 newQueueCounts &= ~kWaitingSharedMask;
306 
307                 // Because this is the exclusive release, then there are zero readers. So, the bits
308                 // for shared locks should be zero. Since those bits are zero, we can just |= in the
309                 // waitingShared count instead of clearing with an &= and then |= the count.
310                 newQueueCounts |= waitingShared << kSharedOffset;
311             }
312 
313         } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
314                                                 sk_memory_order_release, sk_memory_order_relaxed));
315 
316         if (waitingShared > 0) {
317             // Run all the shared.
318             fSharedQueue.signal(waitingShared);
319         } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
320             // Run a single exclusive waiter.
321             fExclusiveQueue.signal();
322         }
323     }
324 
acquireShared()325     void SkSharedMutex::acquireShared() {
326         int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
327         int32_t newQueueCounts;
328         do {
329             newQueueCounts = oldQueueCounts;
330             // If there are waiting exclusives then this shared lock waits else it runs.
331             if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
332                 newQueueCounts += 1 << kWaitingSharedOffset;
333             } else {
334                 newQueueCounts += 1 << kSharedOffset;
335             }
336         } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
337                                                 sk_memory_order_acquire, sk_memory_order_relaxed));
338 
339         // If there are waiting exclusives, then this shared waits until after it runs.
340         if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
341             fSharedQueue.wait();
342         }
343         ANNOTATE_RWLOCK_ACQUIRED(this, 0);
344 
345     }
346 
releaseShared()347     void SkSharedMutex::releaseShared() {
348         ANNOTATE_RWLOCK_RELEASED(this, 0);
349 
350         // Decrement the shared count.
351         int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
352                                                         sk_memory_order_release);
353 
354         // If shared count is going to zero (because the old count == 1) and there are exclusive
355         // waiters, then run a single exclusive waiter.
356         if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
357             && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
358             fExclusiveQueue.signal();
359         }
360     }
361 
362 #endif
363