1 // Copyright (c) 2015-2018 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5 #include <scheduler.h>
6
7 #include <random.h>
8 #include <reverselock.h>
9
10 #include <assert.h>
11 #include <utility>
12
CScheduler()13 CScheduler::CScheduler() : nThreadsServicingQueue(0), stopRequested(false), stopWhenEmpty(false)
14 {
15 }
16
~CScheduler()17 CScheduler::~CScheduler()
18 {
19 assert(nThreadsServicingQueue == 0);
20 }
21
22
23 #if BOOST_VERSION < 105000
toPosixTime(const boost::chrono::system_clock::time_point & t)24 static boost::system_time toPosixTime(const boost::chrono::system_clock::time_point& t)
25 {
26 // Creating the posix_time using from_time_t loses sub-second precision. So rather than exporting the time_point to time_t,
27 // start with a posix_time at the epoch (0) and add the milliseconds that have passed since then.
28 return boost::posix_time::from_time_t(0) + boost::posix_time::milliseconds(boost::chrono::duration_cast<boost::chrono::milliseconds>(t.time_since_epoch()).count());
29 }
30 #endif
31
serviceQueue()32 void CScheduler::serviceQueue()
33 {
34 boost::unique_lock<boost::mutex> lock(newTaskMutex);
35 ++nThreadsServicingQueue;
36
37 // newTaskMutex is locked throughout this loop EXCEPT
38 // when the thread is waiting or when the user's function
39 // is called.
40 while (!shouldStop()) {
41 try {
42 if (!shouldStop() && taskQueue.empty()) {
43 reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
44 // Use this chance to get more entropy
45 RandAddSeedSleep();
46 }
47 while (!shouldStop() && taskQueue.empty()) {
48 // Wait until there is something to do.
49 newTaskScheduled.wait(lock);
50 }
51
52 // Wait until either there is a new task, or until
53 // the time of the first item on the queue:
54
55 // wait_until needs boost 1.50 or later; older versions have timed_wait:
56 #if BOOST_VERSION < 105000
57 while (!shouldStop() && !taskQueue.empty() &&
58 newTaskScheduled.timed_wait(lock, toPosixTime(taskQueue.begin()->first))) {
59 // Keep waiting until timeout
60 }
61 #else
62 // Some boost versions have a conflicting overload of wait_until that returns void.
63 // Explicitly use a template here to avoid hitting that overload.
64 while (!shouldStop() && !taskQueue.empty()) {
65 boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
66 if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout)
67 break; // Exit loop after timeout, it means we reached the time of the event
68 }
69 #endif
70 // If there are multiple threads, the queue can empty while we're waiting (another
71 // thread may service the task we were waiting on).
72 if (shouldStop() || taskQueue.empty())
73 continue;
74
75 Function f = taskQueue.begin()->second;
76 taskQueue.erase(taskQueue.begin());
77
78 {
79 // Unlock before calling f, so it can reschedule itself or another task
80 // without deadlocking:
81 reverse_lock<boost::unique_lock<boost::mutex> > rlock(lock);
82 f();
83 }
84 } catch (...) {
85 --nThreadsServicingQueue;
86 throw;
87 }
88 }
89 --nThreadsServicingQueue;
90 newTaskScheduled.notify_one();
91 }
92
stop(bool drain)93 void CScheduler::stop(bool drain)
94 {
95 {
96 boost::unique_lock<boost::mutex> lock(newTaskMutex);
97 if (drain)
98 stopWhenEmpty = true;
99 else
100 stopRequested = true;
101 }
102 newTaskScheduled.notify_all();
103 }
104
schedule(CScheduler::Function f,boost::chrono::system_clock::time_point t)105 void CScheduler::schedule(CScheduler::Function f, boost::chrono::system_clock::time_point t)
106 {
107 {
108 boost::unique_lock<boost::mutex> lock(newTaskMutex);
109 taskQueue.insert(std::make_pair(t, f));
110 }
111 newTaskScheduled.notify_one();
112 }
113
scheduleFromNow(CScheduler::Function f,int64_t deltaMilliSeconds)114 void CScheduler::scheduleFromNow(CScheduler::Function f, int64_t deltaMilliSeconds)
115 {
116 schedule(f, boost::chrono::system_clock::now() + boost::chrono::milliseconds(deltaMilliSeconds));
117 }
118
Repeat(CScheduler * s,CScheduler::Function f,int64_t deltaMilliSeconds)119 static void Repeat(CScheduler* s, CScheduler::Function f, int64_t deltaMilliSeconds)
120 {
121 f();
122 s->scheduleFromNow(std::bind(&Repeat, s, f, deltaMilliSeconds), deltaMilliSeconds);
123 }
124
scheduleEvery(CScheduler::Function f,int64_t deltaMilliSeconds)125 void CScheduler::scheduleEvery(CScheduler::Function f, int64_t deltaMilliSeconds)
126 {
127 scheduleFromNow(std::bind(&Repeat, this, f, deltaMilliSeconds), deltaMilliSeconds);
128 }
129
getQueueInfo(boost::chrono::system_clock::time_point & first,boost::chrono::system_clock::time_point & last) const130 size_t CScheduler::getQueueInfo(boost::chrono::system_clock::time_point &first,
131 boost::chrono::system_clock::time_point &last) const
132 {
133 boost::unique_lock<boost::mutex> lock(newTaskMutex);
134 size_t result = taskQueue.size();
135 if (!taskQueue.empty()) {
136 first = taskQueue.begin()->first;
137 last = taskQueue.rbegin()->first;
138 }
139 return result;
140 }
141
AreThreadsServicingQueue() const142 bool CScheduler::AreThreadsServicingQueue() const {
143 boost::unique_lock<boost::mutex> lock(newTaskMutex);
144 return nThreadsServicingQueue;
145 }
146
147
MaybeScheduleProcessQueue()148 void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() {
149 {
150 LOCK(m_cs_callbacks_pending);
151 // Try to avoid scheduling too many copies here, but if we
152 // accidentally have two ProcessQueue's scheduled at once its
153 // not a big deal.
154 if (m_are_callbacks_running) return;
155 if (m_callbacks_pending.empty()) return;
156 }
157 m_pscheduler->schedule(std::bind(&SingleThreadedSchedulerClient::ProcessQueue, this));
158 }
159
ProcessQueue()160 void SingleThreadedSchedulerClient::ProcessQueue() {
161 std::function<void ()> callback;
162 {
163 LOCK(m_cs_callbacks_pending);
164 if (m_are_callbacks_running) return;
165 if (m_callbacks_pending.empty()) return;
166 m_are_callbacks_running = true;
167
168 callback = std::move(m_callbacks_pending.front());
169 m_callbacks_pending.pop_front();
170 }
171
172 // RAII the setting of fCallbacksRunning and calling MaybeScheduleProcessQueue
173 // to ensure both happen safely even if callback() throws.
174 struct RAIICallbacksRunning {
175 SingleThreadedSchedulerClient* instance;
176 explicit RAIICallbacksRunning(SingleThreadedSchedulerClient* _instance) : instance(_instance) {}
177 ~RAIICallbacksRunning() {
178 {
179 LOCK(instance->m_cs_callbacks_pending);
180 instance->m_are_callbacks_running = false;
181 }
182 instance->MaybeScheduleProcessQueue();
183 }
184 } raiicallbacksrunning(this);
185
186 callback();
187 }
188
AddToProcessQueue(std::function<void ()> func)189 void SingleThreadedSchedulerClient::AddToProcessQueue(std::function<void ()> func) {
190 assert(m_pscheduler);
191
192 {
193 LOCK(m_cs_callbacks_pending);
194 m_callbacks_pending.emplace_back(std::move(func));
195 }
196 MaybeScheduleProcessQueue();
197 }
198
EmptyQueue()199 void SingleThreadedSchedulerClient::EmptyQueue() {
200 assert(!m_pscheduler->AreThreadsServicingQueue());
201 bool should_continue = true;
202 while (should_continue) {
203 ProcessQueue();
204 LOCK(m_cs_callbacks_pending);
205 should_continue = !m_callbacks_pending.empty();
206 }
207 }
208
CallbacksPending()209 size_t SingleThreadedSchedulerClient::CallbacksPending() {
210 LOCK(m_cs_callbacks_pending);
211 return m_callbacks_pending.size();
212 }
213