1 // libTorrent - BitTorrent library
2 // Copyright (C) 2005-2011, Jari Sundell
3 //
4 // This program is free software; you can redistribute it and/or modify
5 // it under the terms of the GNU General Public License as published by
6 // the Free Software Foundation; either version 2 of the License, or
7 // (at your option) any later version.
8 //
9 // This program is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 // GNU General Public License for more details.
13 //
14 // You should have received a copy of the GNU General Public License
15 // along with this program; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 //
18 // In addition, as a special exception, the copyright holders give
19 // permission to link the code of portions of this program with the
20 // OpenSSL library under certain conditions as described in each
21 // individual source file, and distribute linked combinations
22 // including the two.
23 //
24 // You must obey the GNU General Public License in all respects for
25 // all of the code used other than OpenSSL. If you modify file(s)
26 // with this exception, you may extend this exception to your version
27 // of the file(s), but you are not obligated to do so. If you do not
28 // wish to do so, delete this exception statement from your version.
29 // If you delete this exception statement from all source files in the
30 // program, then also delete it here.
31 //
32 // Contact: Jari Sundell <jaris@ifi.uio.no>
33 //
34 // Skomakerveien 33
35 // 3185 Skoppum, NORWAY
36
37 #include "config.h"
38
39 #include "exceptions.h"
40 #include "download_info.h"
41 #include "tracker.h"
42 #include "tracker_controller.h"
43 #include "tracker_list.h"
44
45 #include "rak/priority_queue_default.h"
46 #include "utils/log.h"
47
48 #include "globals.h"
49
50 #define LT_LOG_TRACKER(log_level, log_fmt, ...) \
51 lt_log_print_info(LOG_TRACKER_##log_level, m_tracker_list->info(), "tracker_controller", log_fmt, __VA_ARGS__);
52
53 namespace torrent {
54
55 struct tracker_controller_private {
56 rak::priority_item task_timeout;
57 rak::priority_item task_scrape;
58 };
59
60 // End temp hacks...
61
62 void
update_timeout(uint32_t seconds_to_next)63 TrackerController::update_timeout(uint32_t seconds_to_next) {
64 if (!(m_flags & flag_active))
65 throw internal_error("TrackerController cannot set timeout when inactive.");
66
67 rak::timer next_timeout = cachedTime;
68
69 if (seconds_to_next != 0)
70 next_timeout = (cachedTime + rak::timer::from_seconds(seconds_to_next)).round_seconds();
71
72 priority_queue_erase(&taskScheduler, &m_private->task_timeout);
73 priority_queue_insert(&taskScheduler, &m_private->task_timeout, next_timeout);
74 }
75
76 inline int
current_send_state() const77 TrackerController::current_send_state() const {
78 switch ((m_flags & mask_send)) {
79 case flag_send_start: return Tracker::EVENT_STARTED;
80 case flag_send_stop: return Tracker::EVENT_STOPPED;
81 case flag_send_completed: return Tracker::EVENT_COMPLETED;
82 case flag_send_update:
83 default: return Tracker::EVENT_NONE;
84 }
85 }
86
TrackerController(TrackerList * trackers)87 TrackerController::TrackerController(TrackerList* trackers) :
88 m_flags(0),
89 m_tracker_list(trackers),
90 m_private(new tracker_controller_private) {
91
92 m_private->task_timeout.slot() = std::bind(&TrackerController::do_timeout, this);
93 m_private->task_scrape.slot() = std::bind(&TrackerController::do_scrape, this);
94 }
95
~TrackerController()96 TrackerController::~TrackerController() {
97 priority_queue_erase(&taskScheduler, &m_private->task_timeout);
98 priority_queue_erase(&taskScheduler, &m_private->task_scrape);
99 delete m_private;
100 }
101
102 rak::priority_item*
task_timeout()103 TrackerController::task_timeout() {
104 return &m_private->task_timeout;
105 }
106
107 rak::priority_item*
task_scrape()108 TrackerController::task_scrape() {
109 return &m_private->task_scrape;
110 }
111
112 int64_t
next_timeout() const113 TrackerController::next_timeout() const {
114 return m_private->task_timeout.time().usec();
115 }
116
117 int64_t
next_scrape() const118 TrackerController::next_scrape() const {
119 return m_private->task_scrape.time().usec();
120 }
121
122 uint32_t
seconds_to_next_timeout() const123 TrackerController::seconds_to_next_timeout() const {
124 return std::max(m_private->task_timeout.time() - cachedTime, rak::timer()).seconds_ceiling();
125 }
126
127 uint32_t
seconds_to_next_scrape() const128 TrackerController::seconds_to_next_scrape() const {
129 return std::max(m_private->task_scrape.time() - cachedTime, rak::timer()).seconds_ceiling();
130 }
131
132 void
manual_request(bool request_now)133 TrackerController::manual_request(bool request_now) {
134 if (!m_private->task_timeout.is_queued())
135 return;
136
137 // Add functions to get the lowest timeout, etc...
138 send_update_event();
139 }
140
141 void
scrape_request(uint32_t seconds_to_request)142 TrackerController::scrape_request(uint32_t seconds_to_request) {
143 rak::timer next_timeout = cachedTime;
144
145 if (seconds_to_request != 0)
146 next_timeout = (cachedTime + rak::timer::from_seconds(seconds_to_request)).round_seconds();
147
148 priority_queue_erase(&taskScheduler, &m_private->task_scrape);
149 priority_queue_insert(&taskScheduler, &m_private->task_scrape, next_timeout);
150 }
151
152 // The send_*_event() functions tries to ensure the relevant trackers
153 // receive the event.
154 //
155 // When we just want more peers the start_requesting() function is
156 // used. This is all independent of the regular updates sent to the
157 // trackers.
158
159 void
send_start_event()160 TrackerController::send_start_event() {
161 // This will now be 'lazy', rather than a definite event. We tell
162 // the controller that a 'start' event should be sent, and it will
163 // send it when the tracker controller get's enabled.
164
165 // If the controller is already running, we insert this new event.
166
167 // Return, or something, if already active and sending?
168
169 if (m_flags & flag_send_start) {
170 // Do we just return, or bork? At least we need to check to see
171 // that there's something requesting 'start' event or fail hard.
172 }
173
174 m_flags &= ~mask_send;
175 m_flags |= flag_send_start;
176
177 if (!(m_flags & flag_active) || !m_tracker_list->has_usable()) {
178 LT_LOG_TRACKER(INFO, "Queueing started event.", 0);
179 return;
180 }
181
182 // Start with requesting from the first tracker. Add timer to
183 // catch when we don't get any response within the first few
184 // seconds, at which point we go promiscious.
185
186 // Do we use the old 'focus' thing?... Rather react on no reply,
187 // go into promiscious.
188 LT_LOG_TRACKER(INFO, "Sending started event.", 0);
189
190 close();
191 m_tracker_list->send_state_itr(m_tracker_list->find_usable(m_tracker_list->begin()), Tracker::EVENT_STARTED);
192
193 if (m_tracker_list->count_usable() > 1) {
194 m_flags |= flag_promiscuous_mode;
195 update_timeout(3);
196 }
197 }
198
199 void
send_stop_event()200 TrackerController::send_stop_event() {
201 if (m_flags & flag_send_stop) {
202 // Do we just return, or bork? At least we need to check to see
203 // that there's something requesting 'start' event or fail hard.
204 }
205
206 m_flags &= ~mask_send;
207
208 if (!(m_flags & flag_active) || !m_tracker_list->has_usable()) {
209 LT_LOG_TRACKER(INFO, "Skipping stopped event as no tracker need it.", 0);
210 return;
211 }
212
213 m_flags |= flag_send_stop;
214
215 LT_LOG_TRACKER(INFO, "Sending stopped event.", 0);
216
217 close();
218
219 for (TrackerList::iterator itr = m_tracker_list->begin(); itr != m_tracker_list->end(); itr++) {
220 if (!(*itr)->is_in_use())
221 continue;
222
223 m_tracker_list->send_state(*itr, Tracker::EVENT_STOPPED);
224 }
225
226 // Timer...
227 }
228
229 void
send_completed_event()230 TrackerController::send_completed_event() {
231 if (m_flags & flag_send_completed) {
232 // Do we just return, or bork? At least we need to check to see
233 // that there's something requesting 'start' event or fail hard.
234 }
235
236 m_flags &= ~mask_send;
237 m_flags |= flag_send_completed;
238
239 if (!(m_flags & flag_active) || !m_tracker_list->has_usable()) {
240 LT_LOG_TRACKER(INFO, "Queueing completed event.", 0);
241 return;
242 }
243
244 LT_LOG_TRACKER(INFO, "Sending completed event.", 0);
245
246 // Send to all trackers that would want to know.
247
248 close();
249
250 for (TrackerList::iterator itr = m_tracker_list->begin(); itr != m_tracker_list->end(); itr++) {
251 if (!(*itr)->is_in_use())
252 continue;
253
254 m_tracker_list->send_state(*itr, Tracker::EVENT_COMPLETED);
255 }
256
257 // Timer...
258 }
259
260 void
send_update_event()261 TrackerController::send_update_event() {
262 if (!(m_flags & flag_active) || !m_tracker_list->has_usable())
263 return;
264
265 if ((m_flags & mask_send) && m_tracker_list->has_active())
266 return;
267
268 // We can lose a state here...
269 if (!(m_flags & mask_send))
270 m_flags |= flag_send_update;
271
272 LT_LOG_TRACKER(INFO, "Sending update event.", 0);
273
274 m_tracker_list->send_state_itr(m_tracker_list->find_usable(m_tracker_list->begin()), Tracker::EVENT_NONE);
275
276 // if (m_tracker_list->has_active())
277 // priority_queue_erase(&taskScheduler, &m_private->task_timeout);
278 }
279
280 // Currently being used by send_state, fixme.
281 void
close(int flags)282 TrackerController::close(int flags) {
283 m_flags &= ~(flag_requesting | flag_promiscuous_mode);
284
285 if ((flags & (close_disown_stop | close_disown_completed)))
286 m_tracker_list->disown_all_including(close_disown_stop | close_disown_completed);
287
288 m_tracker_list->close_all();
289 priority_queue_erase(&taskScheduler, &m_private->task_timeout);
290 }
291
292 void
enable(int enable_flags)293 TrackerController::enable(int enable_flags) {
294 if ((m_flags & flag_active))
295 return;
296
297 // Clearing send stop here in case we cycle disable/enable too
298 // fast. In the future do this based on flags passed.
299 m_flags |= flag_active;
300 m_flags &= ~flag_send_stop;
301
302 m_tracker_list->close_all_excluding((1 << Tracker::EVENT_COMPLETED));
303
304 if (!(enable_flags & enable_dont_reset_stats))
305 m_tracker_list->clear_stats();
306
307 LT_LOG_TRACKER(INFO, "Called enable with %u trackers.", m_tracker_list->size());
308
309 // Adding of the tracker requests gets done after the caller has had
310 // a chance to override the default behavior.
311 update_timeout(0);
312 }
313
314 void
disable()315 TrackerController::disable() {
316 if (!(m_flags & flag_active))
317 return;
318
319 // Disable other flags?...
320 m_flags &= ~(flag_active | flag_requesting | flag_promiscuous_mode);
321
322 m_tracker_list->close_all_excluding((1 << Tracker::EVENT_STOPPED) | (1 << Tracker::EVENT_COMPLETED));
323 priority_queue_erase(&taskScheduler, &m_private->task_timeout);
324
325 LT_LOG_TRACKER(INFO, "Called disable with %u trackers.", m_tracker_list->size());
326 }
327
328 void
start_requesting()329 TrackerController::start_requesting() {
330 if ((m_flags & flag_requesting))
331 return;
332
333 m_flags |= flag_requesting;
334
335 if ((m_flags & flag_active))
336 update_timeout(0);
337
338 LT_LOG_TRACKER(INFO, "Start requesting.", 0);
339 }
340
341 void
stop_requesting()342 TrackerController::stop_requesting() {
343 if (!(m_flags & flag_requesting))
344 return;
345
346 m_flags &= ~flag_requesting;
347
348 LT_LOG_TRACKER(INFO, "Stop requesting.", 0);
349 }
350
351 uint32_t
tracker_next_timeout(Tracker * tracker,int controller_flags)352 tracker_next_timeout(Tracker* tracker, int controller_flags) {
353 if ((controller_flags & TrackerController::flag_requesting))
354 return tracker_next_timeout_promiscuous(tracker);
355
356 if ((tracker->is_busy() && tracker->latest_event() != Tracker::EVENT_SCRAPE) ||
357 !tracker->is_usable())
358 return ~uint32_t();
359
360 if ((controller_flags & TrackerController::flag_promiscuous_mode))
361 return 0;
362
363 if ((controller_flags & TrackerController::flag_send_update))
364 return tracker_next_timeout_update(tracker);
365
366 // if (tracker->success_counter() == 0 && tracker->failed_counter() == 0)
367 // return 0;
368
369 int32_t last_activity = cachedTime.seconds() - tracker->activity_time_last();
370
371 // TODO: Use min interval if we're requesting manual update.
372
373 return tracker->normal_interval() - std::min(last_activity, (int32_t)tracker->normal_interval());
374 }
375
376 uint32_t
tracker_next_timeout_update(Tracker * tracker)377 tracker_next_timeout_update(Tracker* tracker) {
378 if ((tracker->is_busy() && tracker->latest_event() != Tracker::EVENT_SCRAPE) ||
379 !tracker->is_usable())
380 return ~uint32_t();
381
382 // Make sure we don't request _too_ often, check last activity.
383 // int32_t last_activity = cachedTime.seconds() - tracker->activity_time_last();
384
385 return 0;
386 }
387
388 uint32_t
tracker_next_timeout_promiscuous(Tracker * tracker)389 tracker_next_timeout_promiscuous(Tracker* tracker) {
390 if ((tracker->is_busy() && tracker->latest_event() != Tracker::EVENT_SCRAPE) ||
391 !tracker->is_usable())
392 return ~uint32_t();
393
394 int32_t interval;
395
396 if (tracker->failed_counter())
397 interval = 5 << std::min<int>(tracker->failed_counter() - 1, 6);
398 else
399 interval = tracker->normal_interval();
400
401 int32_t min_interval = std::max(tracker->min_interval(), (uint32_t)300);
402 int32_t use_interval = std::min(interval, min_interval);
403
404 int32_t since_last = cachedTime.seconds() - (int32_t)tracker->activity_time_last();
405
406 return std::max(use_interval - since_last, 0);
407 }
408
409 TrackerList::iterator
tracker_find_preferred(TrackerList::iterator first,TrackerList::iterator last,uint32_t * next_timeout)410 tracker_find_preferred(TrackerList::iterator first, TrackerList::iterator last, uint32_t* next_timeout) {
411 TrackerList::iterator preferred = last;
412 uint32_t preferred_time_last = ~uint32_t();
413
414 for (; first != last; first++) {
415 uint32_t tracker_timeout = tracker_next_timeout_promiscuous(*first);
416
417 if (tracker_timeout != 0) {
418 *next_timeout = std::min(tracker_timeout, *next_timeout);
419 continue;
420 }
421
422 if ((*first)->activity_time_last() < preferred_time_last) {
423 preferred = first;
424 preferred_time_last = (*first)->activity_time_last();
425 }
426 }
427
428 return preferred;
429 }
430
431 void
do_timeout()432 TrackerController::do_timeout() {
433 if (!(m_flags & flag_active) || !m_tracker_list->has_usable())
434 return;
435
436 priority_queue_erase(&taskScheduler, &m_private->task_timeout);
437
438 int send_state = current_send_state();
439
440 if ((m_flags & (flag_promiscuous_mode | flag_requesting))) {
441 uint32_t next_timeout = ~uint32_t();
442
443 TrackerList::iterator itr = m_tracker_list->begin();
444
445 while (itr != m_tracker_list->end()) {
446 uint32_t group = (*itr)->group();
447
448 if (m_tracker_list->has_active_not_scrape_in_group(group)) {
449 itr = m_tracker_list->end_group(group);
450 continue;
451 }
452
453 TrackerList::iterator group_end = m_tracker_list->end_group((*itr)->group());
454 TrackerList::iterator preferred = itr;
455
456 if (!(*itr)->is_usable() || (*itr)->failed_counter()) {
457 // The selected tracker in the group is either disabled or not
458 // reachable, try the others to find a new one to use.
459 preferred = tracker_find_preferred(preferred, group_end, &next_timeout);
460
461 } else {
462 uint32_t tracker_timeout = tracker_next_timeout_promiscuous(*preferred);
463
464 if (tracker_timeout != 0) {
465 next_timeout = std::min(tracker_timeout, next_timeout);
466 preferred = group_end;
467 }
468 }
469
470 if (preferred != group_end)
471 m_tracker_list->send_state_itr(preferred, send_state);
472
473 itr = group_end;
474 }
475
476 if (next_timeout != ~uint32_t())
477 update_timeout(next_timeout);
478
479 // TODO: Send for start/completed also?
480 } else {
481 TrackerList::iterator itr = m_tracker_list->find_next_to_request(m_tracker_list->begin());
482
483 if (itr == m_tracker_list->end())
484 return;
485
486 int32_t next_timeout = (*itr)->activity_time_next();
487
488 if (next_timeout <= cachedTime.seconds())
489 m_tracker_list->send_state_itr(itr, send_state);
490 else
491 update_timeout(next_timeout - cachedTime.seconds());
492 }
493
494 if (m_slot_timeout)
495 m_slot_timeout();
496 }
497
498 void
do_scrape()499 TrackerController::do_scrape() {
500 TrackerList::iterator itr = m_tracker_list->begin();
501
502 while (itr != m_tracker_list->end()) {
503 uint32_t group = (*itr)->group();
504
505 if (m_tracker_list->has_active_in_group(group)) {
506 itr = m_tracker_list->end_group(group);
507 continue;
508 }
509
510 TrackerList::iterator group_end = m_tracker_list->end_group((*itr)->group());
511
512 while (itr != group_end) {
513 if ((*itr)->can_scrape() && (*itr)->is_usable()) {
514 m_tracker_list->send_scrape(*itr);
515 break;
516 }
517
518 itr++;
519 }
520
521 itr = group_end;
522 }
523 }
524
525 uint32_t
receive_success(Tracker * tb,TrackerController::address_list * l)526 TrackerController::receive_success(Tracker* tb, TrackerController::address_list* l) {
527 if (!(m_flags & flag_active))
528 return m_slot_success(l);
529
530 // if (<check if we have multiple trackers to send this event to, before we declare success>) {
531 m_flags &= ~(mask_send | flag_promiscuous_mode | flag_failure_mode);
532 // }
533
534 // If we still have active trackers, skip the timeout.
535
536 // Calculate the next timeout according to a list of in-use
537 // trackers, with the first timeout as the interval.
538
539 if ((m_flags & flag_requesting))
540 update_timeout(30);
541 else if (!m_tracker_list->has_active())
542 // TODO: Instead find the lowest timeout, correct timeout?
543 update_timeout(tb->normal_interval());
544
545 return m_slot_success(l);
546 }
547
548 void
receive_failure(Tracker * tb,const std::string & msg)549 TrackerController::receive_failure(Tracker* tb, const std::string& msg) {
550 if (!(m_flags & flag_active)) {
551 m_slot_failure(msg);
552 return;
553 }
554
555 if (tb == NULL) {
556 LT_LOG_TRACKER(INFO, "Received failure msg:'%s'.", msg.c_str());
557 m_slot_failure(msg);
558 return;
559 }
560
561 if (tb->failed_counter() == 1 && tb->success_counter() > 0)
562 m_flags |= flag_failure_mode;
563
564 do_timeout();
565 m_slot_failure(msg);
566 }
567
568 void
receive_scrape(Tracker * tb)569 TrackerController::receive_scrape(Tracker* tb) {
570 if (!(m_flags & flag_active)) {
571 return;
572 }
573 }
574
575 void
receive_tracker_enabled(Tracker * tb)576 TrackerController::receive_tracker_enabled(Tracker* tb) {
577 // TODO: This won't be needed if we rely only on Tracker::m_enable,
578 // rather than a virtual function.
579 if (!m_tracker_list->has_usable())
580 return;
581
582 if ((m_flags & flag_active)) {
583 if (!m_private->task_timeout.is_queued() && !m_tracker_list->has_active()) {
584 // TODO: Figure out the proper timeout to use here based on when the
585 // tracker last connected, etc.
586 update_timeout(0);
587 }
588 }
589
590 if (m_slot_tracker_enabled)
591 m_slot_tracker_enabled(tb);
592 }
593
594 void
receive_tracker_disabled(Tracker * tb)595 TrackerController::receive_tracker_disabled(Tracker* tb) {
596 if ((m_flags & flag_active) && !m_private->task_timeout.is_queued())
597 update_timeout(0);
598
599 if (m_slot_tracker_disabled)
600 m_slot_tracker_disabled(tb);
601 }
602
603 }
604