1 /*
2  * Copyright (c) Facebook, Inc. and its affiliates.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #pragma once
18 
19 #include <algorithm>
20 #include <condition_variable>
21 #include <functional>
22 #include <map>
23 #include <random>
24 #include <thread>
25 #include <vector>
26 
27 #include <glog/logging.h>
28 
29 #include <folly/Random.h>
30 #include <folly/Synchronized.h>
31 #include <folly/container/Foreach.h>
32 #include <folly/portability/GTest.h>
33 
34 namespace folly {
35 namespace sync_tests {
36 
randomSleep(std::chrono::milliseconds min,std::chrono::milliseconds max)37 void randomSleep(std::chrono::milliseconds min, std::chrono::milliseconds max) {
38   std::uniform_int_distribution<> range(min.count(), max.count());
39   folly::ThreadLocalPRNG prng;
40   std::chrono::milliseconds duration(range(prng));
41   /* sleep override */
42   std::this_thread::sleep_for(duration);
43 }
44 
45 /*
46  * Run a functon simultaneously in a number of different threads.
47  *
48  * The function will be passed the index number of the thread it is running in.
49  * This function makes an attempt to synchronize the start of the threads as
50  * best as possible.  It waits for all threads to be allocated and started
51  * before invoking the function.
52  */
53 template <class Function>
runParallel(size_t numThreads,const Function & function)54 void runParallel(size_t numThreads, const Function& function) {
55   std::vector<std::thread> threads;
56   threads.reserve(numThreads);
57 
58   // Variables used to synchronize all threads to try and start them
59   // as close to the same time as possible
60   folly::Synchronized<size_t, std::mutex> threadsReady(0);
61   std::condition_variable readyCV;
62   folly::Synchronized<bool, std::mutex> go(false);
63   std::condition_variable goCV;
64 
65   auto worker = [&](size_t threadIndex) {
66     // Signal that we are ready
67     ++(*threadsReady.lock());
68     readyCV.notify_one();
69 
70     // Wait until we are given the signal to start
71     // The purpose of this is to try and make sure all threads start
72     // as close to the same time as possible.
73     {
74       auto lockedGo = go.lock();
75       goCV.wait(lockedGo.as_lock(), [&] { return *lockedGo; });
76     }
77 
78     function(threadIndex);
79   };
80 
81   // Start all of the threads
82   for (size_t threadIndex = 0; threadIndex < numThreads; ++threadIndex) {
83     threads.emplace_back([threadIndex, &worker]() { worker(threadIndex); });
84   }
85 
86   // Wait for all threads to become ready
87   {
88     auto readyLocked = threadsReady.lock();
89     readyCV.wait(
90         readyLocked.as_lock(), [&] { return *readyLocked == numThreads; });
91   }
92   // Now signal the threads that they can go
93   go = true;
94   goCV.notify_all();
95 
96   // Wait for all threads to finish
97   for (auto& thread : threads) {
98     thread.join();
99   }
100 }
101 
102 // testBasic() version for shared lock types
103 template <class Mutex>
104 std::enable_if_t<folly::detail::kSynchronizedMutexIsShared<void, Mutex>>
testBasicImpl()105 testBasicImpl() {
106   folly::Synchronized<std::vector<int>, Mutex> obj;
107   const auto& constObj = obj;
108 
109   obj.wlock()->resize(1000);
110 
111   folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.wlock()};
112   EXPECT_EQ(1000, obj2.rlock()->size());
113 
114   {
115     auto lockedObj = obj.wlock();
116     lockedObj->push_back(10);
117     EXPECT_EQ(1001, lockedObj->size());
118     EXPECT_EQ(10, lockedObj->back());
119     EXPECT_EQ(1000, obj2.wlock()->size());
120     EXPECT_EQ(1000, obj2.rlock()->size());
121 
122     {
123       auto unlocker = lockedObj.scopedUnlock();
124       EXPECT_EQ(1001, obj.wlock()->size());
125     }
126   }
127 
128   {
129     auto lockedObj = obj.rlock();
130     EXPECT_EQ(1001, lockedObj->size());
131     EXPECT_EQ(1001, obj.rlock()->size());
132     {
133       auto unlocker = lockedObj.scopedUnlock();
134       EXPECT_EQ(1001, obj.wlock()->size());
135     }
136   }
137 
138   obj.wlock()->front() = 2;
139 
140   {
141     // contextualLock() on a const reference should grab a shared lock
142     auto lockedObj = constObj.contextualLock();
143     EXPECT_EQ(2, lockedObj->front());
144     EXPECT_EQ(2, constObj.rlock()->front());
145     EXPECT_EQ(2, obj.rlock()->front());
146   }
147 
148   EXPECT_EQ(1001, obj.rlock()->size());
149   EXPECT_EQ(2, obj.rlock()->front());
150   EXPECT_EQ(10, obj.rlock()->back());
151   EXPECT_EQ(1000, obj2.rlock()->size());
152 }
153 
154 // testBasic() version for non-shared lock types
155 template <class Mutex>
156 std::enable_if_t<!folly::detail::kSynchronizedMutexIsShared<void, Mutex>>
testBasicImpl()157 testBasicImpl() {
158   folly::Synchronized<std::vector<int>, Mutex> obj;
159   const auto& constObj = obj;
160 
161   obj.lock()->resize(1000);
162 
163   folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.lock()};
164   EXPECT_EQ(1000, obj2.lock()->size());
165 
166   {
167     auto lockedObj = obj.lock();
168     lockedObj->push_back(10);
169     EXPECT_EQ(1001, lockedObj->size());
170     EXPECT_EQ(10, lockedObj->back());
171     EXPECT_EQ(1000, obj2.lock()->size());
172 
173     {
174       auto unlocker = lockedObj.scopedUnlock();
175       EXPECT_EQ(1001, obj.lock()->size());
176     }
177   }
178   {
179     auto lockedObj = constObj.lock();
180     EXPECT_EQ(1001, lockedObj->size());
181     EXPECT_EQ(10, lockedObj->back());
182     EXPECT_EQ(1000, obj2.lock()->size());
183   }
184 
185   obj.lock()->front() = 2;
186 
187   EXPECT_EQ(1001, obj.lock()->size());
188   EXPECT_EQ(2, obj.lock()->front());
189   EXPECT_EQ(2, obj.contextualLock()->front());
190   EXPECT_EQ(10, obj.lock()->back());
191   EXPECT_EQ(1000, obj2.lock()->size());
192 }
193 
194 template <class Mutex>
testBasic()195 void testBasic() {
196   testBasicImpl<Mutex>();
197 }
198 
199 // testWithLock() version for shared lock types
200 template <class Mutex>
201 std::enable_if_t<folly::detail::kSynchronizedMutexIsShared<void, Mutex>>
testWithLock()202 testWithLock() {
203   folly::Synchronized<std::vector<int>, Mutex> obj;
204   const auto& constObj = obj;
205 
206   // Test withWLock() and withRLock()
207   obj.withWLock([](std::vector<int>& lockedObj) {
208     lockedObj.resize(1000);
209     lockedObj.push_back(10);
210     lockedObj.push_back(11);
211   });
212   obj.withWLock([](const std::vector<int>& lockedObj) {
213     EXPECT_EQ(1002, lockedObj.size());
214   });
215   constObj.withWLock([](const std::vector<int>& lockedObj) {
216     EXPECT_EQ(1002, lockedObj.size());
217     EXPECT_EQ(11, lockedObj.back());
218   });
219   obj.withRLock([](const std::vector<int>& lockedObj) {
220     EXPECT_EQ(1002, lockedObj.size());
221     EXPECT_EQ(11, lockedObj.back());
222   });
223   constObj.withRLock([](const std::vector<int>& lockedObj) {
224     EXPECT_EQ(1002, lockedObj.size());
225   });
226 
227 #if __cpp_generic_lambdas >= 201304
228   obj.withWLock([](auto& lockedObj) { lockedObj.push_back(12); });
229   obj.withWLock(
230       [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
231   constObj.withWLock([](const auto& lockedObj) {
232     EXPECT_EQ(1003, lockedObj.size());
233     EXPECT_EQ(12, lockedObj.back());
234   });
235   obj.withRLock([](const auto& lockedObj) {
236     EXPECT_EQ(1003, lockedObj.size());
237     EXPECT_EQ(12, lockedObj.back());
238   });
239   constObj.withRLock(
240       [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
241   obj.withWLock([](auto& lockedObj) { lockedObj.pop_back(); });
242 #endif
243 
244   // Test withWLockPtr() and withRLockPtr()
245   using SynchType = folly::Synchronized<std::vector<int>, Mutex>;
246 #if __cpp_generic_lambdas >= 201304
247   obj.withWLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); });
248   obj.withRLockPtr([](auto&& lockedObj) {
249     EXPECT_EQ(1003, lockedObj->size());
250     EXPECT_EQ(13, lockedObj->back());
251   });
252   constObj.withRLockPtr([](auto&& lockedObj) {
253     EXPECT_EQ(1003, lockedObj->size());
254     EXPECT_EQ(13, lockedObj->back());
255   });
256   obj.withWLockPtr([&](auto&& lockedObj) {
257     lockedObj->push_back(14);
258     {
259       auto unlocker = lockedObj.scopedUnlock();
260       obj.wlock()->push_back(15);
261     }
262     EXPECT_EQ(15, lockedObj->back());
263   });
264   constObj.withWLockPtr([](auto&& lockedObj) {
265     EXPECT_EQ(1005, lockedObj->size());
266     EXPECT_EQ(15, lockedObj->back());
267   });
268 #else
269   obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
270     lockedObj->push_back(13);
271     lockedObj->push_back(14);
272     lockedObj->push_back(15);
273   });
274 #endif
275 
276   obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
277     lockedObj->push_back(16);
278     EXPECT_EQ(1006, lockedObj->size());
279   });
280   constObj.withWLockPtr([](typename SynchType::ConstWLockedPtr&& lockedObj) {
281     EXPECT_EQ(1006, lockedObj->size());
282     EXPECT_EQ(16, lockedObj->back());
283   });
284   obj.withRLockPtr([](typename SynchType::RLockedPtr&& lockedObj) {
285     EXPECT_TRUE(
286         (std::is_const<std::remove_reference_t<decltype(*lockedObj)>>{}));
287     EXPECT_EQ(1006, lockedObj->size());
288     EXPECT_EQ(16, lockedObj->back());
289   });
290   constObj.withRLockPtr([](typename SynchType::ConstRLockedPtr&& lockedObj) {
291     EXPECT_TRUE(
292         (std::is_const<std::remove_reference_t<decltype(*lockedObj)>>{}));
293     EXPECT_EQ(1006, lockedObj->size());
294     EXPECT_EQ(16, lockedObj->back());
295   });
296 }
297 
298 // testWithLock() version for non-shared lock types
299 template <class Mutex>
300 std::enable_if_t<!folly::detail::kSynchronizedMutexIsShared<void, Mutex>>
testWithLock()301 testWithLock() {
302   folly::Synchronized<std::vector<int>, Mutex> obj;
303 
304   // Test withLock()
305   obj.withLock([](std::vector<int>& lockedObj) {
306     lockedObj.resize(1000);
307     lockedObj.push_back(10);
308     lockedObj.push_back(11);
309   });
310   obj.withLock([](const std::vector<int>& lockedObj) {
311     EXPECT_EQ(1002, lockedObj.size());
312   });
313 
314 #if __cpp_generic_lambdas >= 201304
315   obj.withLock([](auto& lockedObj) { lockedObj.push_back(12); });
316   obj.withLock(
317       [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
318   obj.withLock([](auto& lockedObj) { lockedObj.pop_back(); });
319 #endif
320 
321   // Test withLockPtr()
322   using SynchType = folly::Synchronized<std::vector<int>, Mutex>;
323 #if __cpp_generic_lambdas >= 201304
324   obj.withLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); });
325   obj.withLockPtr([](auto&& lockedObj) {
326     EXPECT_EQ(1003, lockedObj->size());
327     EXPECT_EQ(13, lockedObj->back());
328   });
329   obj.withLockPtr([&](auto&& lockedObj) {
330     lockedObj->push_back(14);
331     {
332       auto unlocker = lockedObj.scopedUnlock();
333       obj.lock()->push_back(15);
334     }
335     EXPECT_EQ(1005, lockedObj->size());
336     EXPECT_EQ(15, lockedObj->back());
337   });
338 #else
339   obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
340     lockedObj->push_back(13);
341     lockedObj->push_back(14);
342     lockedObj->push_back(15);
343   });
344 #endif
345 
346   obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
347     lockedObj->push_back(16);
348     EXPECT_EQ(1006, lockedObj->size());
349   });
350   const auto& constObj = obj;
351   constObj.withLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
352     EXPECT_EQ(1006, lockedObj->size());
353     EXPECT_EQ(16, lockedObj->back());
354   });
355 }
356 
357 template <class Mutex>
testUnlockCommon()358 void testUnlockCommon() {
359   folly::Synchronized<int, Mutex> value{7};
360   const auto& cv = value;
361 
362   {
363     auto lv = value.contextualLock();
364     EXPECT_EQ(7, *lv);
365     *lv = 5;
366     lv.unlock();
367     EXPECT_TRUE(lv.isNull());
368     EXPECT_FALSE(lv);
369 
370     auto rlv = cv.contextualLock();
371     EXPECT_EQ(5, *rlv);
372     rlv.unlock();
373     EXPECT_TRUE(rlv.isNull());
374     EXPECT_FALSE(rlv);
375 
376     auto rlv2 = cv.contextualRLock();
377     EXPECT_EQ(5, *rlv2);
378     rlv2.unlock();
379 
380     lv = value.contextualLock();
381     EXPECT_EQ(5, *lv);
382     *lv = 9;
383   }
384 
385   EXPECT_EQ(9, *value.contextualRLock());
386 }
387 
388 // testUnlock() version for shared lock types
389 template <class Mutex>
390 std::enable_if_t<folly::detail::kSynchronizedMutexIsShared<void, Mutex>>
testUnlock()391 testUnlock() {
392   folly::Synchronized<int, Mutex> value{10};
393   {
394     auto lv = value.wlock();
395     EXPECT_EQ(10, *lv);
396     *lv = 5;
397     lv.unlock();
398     EXPECT_FALSE(lv);
399     EXPECT_TRUE(lv.isNull());
400 
401     auto rlv = value.rlock();
402     EXPECT_EQ(5, *rlv);
403     rlv.unlock();
404     EXPECT_FALSE(rlv);
405     EXPECT_TRUE(rlv.isNull());
406 
407     auto lv2 = value.wlock();
408     EXPECT_EQ(5, *lv2);
409     *lv2 = 7;
410 
411     lv = std::move(lv2);
412     EXPECT_FALSE(lv2);
413     EXPECT_TRUE(lv2.isNull());
414     EXPECT_FALSE(lv.isNull());
415     EXPECT_EQ(7, *lv);
416   }
417 
418   testUnlockCommon<Mutex>();
419 }
420 
421 // testUnlock() version for non-shared lock types
422 template <class Mutex>
423 std::enable_if_t<!folly::detail::kSynchronizedMutexIsShared<void, Mutex>>
testUnlock()424 testUnlock() {
425   folly::Synchronized<int, Mutex> value{10};
426   {
427     auto lv = value.lock();
428     EXPECT_EQ(10, *lv);
429     *lv = 5;
430     lv.unlock();
431     EXPECT_TRUE(lv.isNull());
432     EXPECT_FALSE(lv);
433 
434     auto lv2 = value.lock();
435     EXPECT_EQ(5, *lv2);
436     *lv2 = 6;
437     lv2.unlock();
438     EXPECT_TRUE(lv2.isNull());
439     EXPECT_FALSE(lv2);
440 
441     lv = value.lock();
442     EXPECT_EQ(6, *lv);
443     *lv = 7;
444 
445     lv2 = std::move(lv);
446     EXPECT_TRUE(lv.isNull());
447     EXPECT_FALSE(lv);
448     EXPECT_FALSE(lv2.isNull());
449     EXPECT_EQ(7, *lv2);
450   }
451 
452   testUnlockCommon<Mutex>();
453 }
454 
455 // Testing the deprecated SYNCHRONIZED and SYNCHRONIZED_CONST APIs
456 template <class Mutex>
testDeprecated()457 [[deprecated]] void testDeprecated() {
458   folly::Synchronized<std::vector<int>, Mutex> obj;
459 
460   obj.contextualLock()->resize(1000);
461 
462   auto obj2 = obj;
463   EXPECT_EQ(1000, obj2.contextualLock()->size());
464 
465   SYNCHRONIZED(obj) {
466     obj.push_back(10);
467     EXPECT_EQ(1001, obj.size());
468     EXPECT_EQ(10, obj.back());
469     EXPECT_EQ(1000, obj2.contextualLock()->size());
470   }
471 
472   SYNCHRONIZED_CONST(obj) { EXPECT_EQ(1001, obj.size()); }
473 
474   SYNCHRONIZED(lockedObj, *&obj) { lockedObj.front() = 2; }
475 
476   EXPECT_EQ(1001, obj.contextualLock()->size());
477   EXPECT_EQ(10, obj.contextualLock()->back());
478   EXPECT_EQ(1000, obj2.contextualLock()->size());
479 
480   EXPECT_EQ(FB_ARG_2_OR_1(1, 2), 2);
481   EXPECT_EQ(FB_ARG_2_OR_1(1), 1);
482 }
483 
484 template <class Mutex>
testConcurrency()485 void testConcurrency() {
486   folly::Synchronized<std::vector<int>, Mutex> v;
487   static const size_t numThreads = 100;
488   // Note: I initially tried using itersPerThread = 1000,
489   // which works fine for most lock types, but std::shared_timed_mutex
490   // appears to be extraordinarily slow.  It could take around 30 seconds
491   // to run this test with 1000 iterations per thread using shared_timed_mutex.
492   static const size_t itersPerThread = 100;
493 
494   auto pushNumbers = [&](size_t threadIdx) {
495     // Test lock()
496     for (size_t n = 0; n < itersPerThread; ++n) {
497       v.contextualLock()->push_back((itersPerThread * threadIdx) + n);
498       std::this_thread::yield();
499     }
500   };
501   runParallel(numThreads, pushNumbers);
502 
503   std::vector<int> result;
504   v.swap(result);
505 
506   EXPECT_EQ(numThreads * itersPerThread, result.size());
507   sort(result.begin(), result.end());
508 
509   for (size_t i = 0; i < itersPerThread * numThreads; ++i) {
510     EXPECT_EQ(i, result[i]);
511   }
512 }
513 
514 template <class Mutex>
testAcquireLocked()515 void testAcquireLocked() {
516   folly::Synchronized<std::vector<int>, Mutex> v;
517   folly::Synchronized<std::map<int, int>, Mutex> m;
518 
519   auto dualLockWorker = [&](size_t threadIdx) {
520     // Note: this will be less awkward with C++ 17's structured
521     // binding functionality, which will make it easier to use the returned
522     // std::tuple.
523     if (threadIdx & 1) {
524       auto ret = acquireLocked(v, m);
525       std::get<0>(ret)->push_back(threadIdx);
526       (*std::get<1>(ret))[threadIdx] = threadIdx + 1;
527     } else {
528       auto ret = acquireLocked(m, v);
529       std::get<1>(ret)->push_back(threadIdx);
530       (*std::get<0>(ret))[threadIdx] = threadIdx + 1;
531     }
532   };
533   static const size_t numThreads = 100;
534   runParallel(numThreads, dualLockWorker);
535 
536   std::vector<int> result;
537   v.swap(result);
538 
539   EXPECT_EQ(numThreads, result.size());
540   sort(result.begin(), result.end());
541 
542   for (size_t i = 0; i < numThreads; ++i) {
543     EXPECT_EQ(i, result[i]);
544   }
545 }
546 
547 template <class Mutex>
testAcquireLockedWithConst()548 void testAcquireLockedWithConst() {
549   folly::Synchronized<std::vector<int>, Mutex> v;
550   folly::Synchronized<std::map<int, int>, Mutex> m;
551 
552   auto dualLockWorker = [&](size_t threadIdx) {
553     const auto& cm = m;
554     if (threadIdx & 1) {
555       auto ret = acquireLocked(v, cm);
556       (void)std::get<1>(ret)->size();
557       std::get<0>(ret)->push_back(threadIdx);
558     } else {
559       auto ret = acquireLocked(cm, v);
560       (void)std::get<0>(ret)->size();
561       std::get<1>(ret)->push_back(threadIdx);
562     }
563   };
564   static const size_t numThreads = 100;
565   runParallel(numThreads, dualLockWorker);
566 
567   std::vector<int> result;
568   v.swap(result);
569 
570   EXPECT_EQ(numThreads, result.size());
571   sort(result.begin(), result.end());
572 
573   for (size_t i = 0; i < numThreads; ++i) {
574     EXPECT_EQ(i, result[i]);
575   }
576 }
577 
578 // Testing the deprecated SYNCHRONIZED_DUAL API
579 template <class Mutex>
testDualLocking()580 [[deprecated]] void testDualLocking() {
581   folly::Synchronized<std::vector<int>, Mutex> v;
582   folly::Synchronized<std::map<int, int>, Mutex> m;
583 
584   auto dualLockWorker = [&](size_t threadIdx) {
585     if (threadIdx & 1) {
586       SYNCHRONIZED_DUAL(lv, v, lm, m) {
587         lv.push_back(threadIdx);
588         lm[threadIdx] = threadIdx + 1;
589       }
590     } else {
591       SYNCHRONIZED_DUAL(lm, m, lv, v) {
592         lv.push_back(threadIdx);
593         lm[threadIdx] = threadIdx + 1;
594       }
595     }
596   };
597   static const size_t numThreads = 100;
598   runParallel(numThreads, dualLockWorker);
599 
600   std::vector<int> result;
601   v.swap(result);
602 
603   EXPECT_EQ(numThreads, result.size());
604   sort(result.begin(), result.end());
605 
606   for (size_t i = 0; i < numThreads; ++i) {
607     EXPECT_EQ(i, result[i]);
608   }
609 }
610 
611 // Testing the deprecated SYNCHRONIZED_DUAL API
612 template <class Mutex>
testDualLockingWithConst()613 [[deprecated]] void testDualLockingWithConst() {
614   folly::Synchronized<std::vector<int>, Mutex> v;
615   folly::Synchronized<std::map<int, int>, Mutex> m;
616 
617   auto dualLockWorker = [&](size_t threadIdx) {
618     const auto& cm = m;
619     if (threadIdx & 1) {
620       SYNCHRONIZED_DUAL(lv, v, lm, cm) {
621         (void)lm.size();
622         lv.push_back(threadIdx);
623       }
624     } else {
625       SYNCHRONIZED_DUAL(lm, cm, lv, v) {
626         (void)lm.size();
627         lv.push_back(threadIdx);
628       }
629     }
630   };
631   static const size_t numThreads = 100;
632   runParallel(numThreads, dualLockWorker);
633 
634   std::vector<int> result;
635   v.swap(result);
636 
637   EXPECT_EQ(numThreads, result.size());
638   sort(result.begin(), result.end());
639 
640   for (size_t i = 0; i < numThreads; ++i) {
641     EXPECT_EQ(i, result[i]);
642   }
643 }
644 
645 template <class Mutex>
testTimed()646 void testTimed() {
647   folly::Synchronized<std::vector<int>, Mutex> v;
648   folly::Synchronized<uint64_t, Mutex> numTimeouts{0};
649 
650   auto worker = [&](size_t threadIdx) {
651     // Test directly using operator-> on the lock result
652     v.contextualLock()->push_back(2 * threadIdx);
653 
654     // Test using lock with a timeout
655     for (;;) {
656       auto lv = v.contextualLock(std::chrono::milliseconds(5));
657       if (!lv) {
658         ++(*numTimeouts.contextualLock());
659         continue;
660       }
661 
662       // Sleep for a random time to ensure we trigger timeouts
663       // in other threads
664       randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15));
665       lv->push_back(2 * threadIdx + 1);
666       break;
667     }
668   };
669 
670   static const size_t numThreads = 100;
671   runParallel(numThreads, worker);
672 
673   std::vector<int> result;
674   v.swap(result);
675 
676   EXPECT_EQ(2 * numThreads, result.size());
677   sort(result.begin(), result.end());
678 
679   for (size_t i = 0; i < 2 * numThreads; ++i) {
680     EXPECT_EQ(i, result[i]);
681   }
682   // We generally expect a large number of number timeouts here.
683   // I'm not adding a check for it since it's theoretically possible that
684   // we might get 0 timeouts depending on the CPU scheduling if our threads
685   // don't get to run very often.
686   LOG(INFO) << "testTimed: " << *numTimeouts.contextualRLock() << " timeouts";
687 
688   // Make sure we can lock with various timeout duration units
689   {
690     auto lv = v.contextualLock(std::chrono::milliseconds(5));
691     EXPECT_TRUE(bool(lv));
692     EXPECT_FALSE(lv.isNull());
693     auto lv2 = v.contextualLock(std::chrono::microseconds(5));
694     // We may or may not acquire lv2 successfully, depending on whether
695     // or not this is a recursive mutex type.
696   }
697   {
698     auto lv = v.contextualLock(std::chrono::seconds(1));
699     EXPECT_TRUE(bool(lv));
700   }
701 }
702 
703 template <class Mutex>
testTimedShared()704 void testTimedShared() {
705   folly::Synchronized<std::vector<int>, Mutex> v;
706   folly::Synchronized<uint64_t, Mutex> numTimeouts{0};
707 
708   auto worker = [&](size_t threadIdx) {
709     // Test directly using operator-> on the lock result
710     v.wlock()->push_back(threadIdx);
711 
712     // Test lock() with a timeout
713     for (;;) {
714       auto lv = v.rlock(std::chrono::milliseconds(10));
715       if (!lv) {
716         ++(*numTimeouts.contextualLock());
717         continue;
718       }
719 
720       // Sleep while holding the lock.
721       //
722       // This will block other threads from acquiring the write lock to add
723       // their thread index to v, but it won't block threads that have entered
724       // the for loop and are trying to acquire a read lock.
725       //
726       // For lock types that give preference to readers rather than writers,
727       // this will tend to serialize all threads on the wlock() above.
728       randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15));
729       auto found = std::find(lv->begin(), lv->end(), threadIdx);
730       CHECK(found != lv->end());
731       break;
732     }
733   };
734 
735   static const size_t numThreads = 100;
736   runParallel(numThreads, worker);
737 
738   std::vector<int> result;
739   v.swap(result);
740 
741   EXPECT_EQ(numThreads, result.size());
742   sort(result.begin(), result.end());
743 
744   for (size_t i = 0; i < numThreads; ++i) {
745     EXPECT_EQ(i, result[i]);
746   }
747   // We generally expect a small number of timeouts here.
748   // For locks that give readers preference over writers this should usually
749   // be 0.  With locks that give writers preference we do see a small-ish
750   // number of read timeouts.
751   LOG(INFO) << "testTimedShared: " << *numTimeouts.contextualRLock()
752             << " timeouts";
753 }
754 
755 template <class Mutex>
testConstCopy()756 void testConstCopy() {
757   std::vector<int> input = {1, 2, 3};
758   const folly::Synchronized<std::vector<int>, Mutex> v(input);
759 
760   std::vector<int> result;
761 
762   v.copyInto(result);
763   EXPECT_EQ(input, result);
764 
765   result = v.copy();
766   EXPECT_EQ(input, result);
767 }
768 
769 struct NotCopiableNotMovable {
NotCopiableNotMovableNotCopiableNotMovable770   NotCopiableNotMovable(int, const char*) {}
771   NotCopiableNotMovable(const NotCopiableNotMovable&) = delete;
772   NotCopiableNotMovable& operator=(const NotCopiableNotMovable&) = delete;
773   NotCopiableNotMovable(NotCopiableNotMovable&&) = delete;
774   NotCopiableNotMovable& operator=(NotCopiableNotMovable&&) = delete;
775 };
776 
777 template <class Mutex>
testInPlaceConstruction()778 void testInPlaceConstruction() {
779   // This won't compile without in_place
780   folly::Synchronized<NotCopiableNotMovable> a(folly::in_place, 5, "a");
781 }
782 
783 template <class Mutex>
testExchange()784 void testExchange() {
785   std::vector<int> input = {1, 2, 3};
786   folly::Synchronized<std::vector<int>, Mutex> v(input);
787   std::vector<int> next = {4, 5, 6};
788   auto prev = v.exchange(std::move(next));
789   EXPECT_EQ((std::vector<int>{{1, 2, 3}}), prev);
790   EXPECT_EQ((std::vector<int>{{4, 5, 6}}), v.copy());
791 }
792 } // namespace sync_tests
793 } // namespace folly
794