1 #include "support/Threading.h"
2 #include "support/Trace.h"
3 #include "llvm/ADT/ScopeExit.h"
4 #include "llvm/Support/FormatVariadic.h"
5 #include "llvm/Support/Threading.h"
6 #include <atomic>
7 #include <thread>
8 #ifdef __USE_POSIX
9 #include <pthread.h>
10 #elif defined(__APPLE__)
11 #include <sys/resource.h>
12 #elif defined(_WIN32)
13 #include <windows.h>
14 #endif
15 
16 namespace clang {
17 namespace clangd {
18 
notify()19 void Notification::notify() {
20   {
21     std::lock_guard<std::mutex> Lock(Mu);
22     Notified = true;
23     // Broadcast with the lock held. This ensures that it's safe to destroy
24     // a Notification after wait() returns, even from another thread.
25     CV.notify_all();
26   }
27 }
28 
wait() const29 void Notification::wait() const {
30   std::unique_lock<std::mutex> Lock(Mu);
31   CV.wait(Lock, [this] { return Notified; });
32 }
33 
Semaphore(std::size_t MaxLocks)34 Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {}
35 
try_lock()36 bool Semaphore::try_lock() {
37   std::unique_lock<std::mutex> Lock(Mutex);
38   if (FreeSlots > 0) {
39     --FreeSlots;
40     return true;
41   }
42   return false;
43 }
44 
lock()45 void Semaphore::lock() {
46   trace::Span Span("WaitForFreeSemaphoreSlot");
47   // trace::Span can also acquire locks in ctor and dtor, we make sure it
48   // happens when Semaphore's own lock is not held.
49   {
50     std::unique_lock<std::mutex> Lock(Mutex);
51     SlotsChanged.wait(Lock, [&]() { return FreeSlots > 0; });
52     --FreeSlots;
53   }
54 }
55 
unlock()56 void Semaphore::unlock() {
57   std::unique_lock<std::mutex> Lock(Mutex);
58   ++FreeSlots;
59   Lock.unlock();
60 
61   SlotsChanged.notify_one();
62 }
63 
~AsyncTaskRunner()64 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
65 
wait(Deadline D) const66 bool AsyncTaskRunner::wait(Deadline D) const {
67   std::unique_lock<std::mutex> Lock(Mutex);
68   return clangd::wait(Lock, TasksReachedZero, D,
69                       [&] { return InFlightTasks == 0; });
70 }
71 
runAsync(const llvm::Twine & Name,llvm::unique_function<void ()> Action)72 void AsyncTaskRunner::runAsync(const llvm::Twine &Name,
73                                llvm::unique_function<void()> Action) {
74   {
75     std::lock_guard<std::mutex> Lock(Mutex);
76     ++InFlightTasks;
77   }
78 
79   auto CleanupTask = llvm::make_scope_exit([this]() {
80     std::lock_guard<std::mutex> Lock(Mutex);
81     int NewTasksCnt = --InFlightTasks;
82     if (NewTasksCnt == 0) {
83       // Note: we can't unlock here because we don't want the object to be
84       // destroyed before we notify.
85       TasksReachedZero.notify_one();
86     }
87   });
88 
89   auto Task = [Name = Name.str(), Action = std::move(Action),
90                Cleanup = std::move(CleanupTask)]() mutable {
91     llvm::set_thread_name(Name);
92     Action();
93     // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
94     Action = nullptr;
95   };
96 
97   // Ensure our worker threads have big enough stacks to run clang.
98   llvm::llvm_execute_on_thread_async(std::move(Task),
99                                      /*clang::DesiredStackSize*/ 8 << 20);
100 }
101 
timeoutSeconds(llvm::Optional<double> Seconds)102 Deadline timeoutSeconds(llvm::Optional<double> Seconds) {
103   using namespace std::chrono;
104   if (!Seconds)
105     return Deadline::infinity();
106   return steady_clock::now() +
107          duration_cast<steady_clock::duration>(duration<double>(*Seconds));
108 }
109 
wait(std::unique_lock<std::mutex> & Lock,std::condition_variable & CV,Deadline D)110 void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV,
111           Deadline D) {
112   if (D == Deadline::zero())
113     return;
114   if (D == Deadline::infinity())
115     return CV.wait(Lock);
116   CV.wait_until(Lock, D.time());
117 }
118 
operator ()()119 bool PeriodicThrottler::operator()() {
120   Rep Now = Stopwatch::now().time_since_epoch().count();
121   Rep OldNext = Next.load(std::memory_order_acquire);
122   if (Now < OldNext)
123     return false;
124   // We're ready to run (but may be racing other threads).
125   // Work out the updated target time, and run if we successfully bump it.
126   Rep NewNext = Now + Period;
127   return Next.compare_exchange_strong(OldNext, NewNext,
128                                       std::memory_order_acq_rel);
129 }
130 
131 } // namespace clangd
132 } // namespace clang
133