1 /* Copyright (c) 2001-2004, Roger Dingledine.
2  * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
3  * Copyright (c) 2007-2021, The Tor Project, Inc. */
4 /* See LICENSE for licensing information */
5 
6 #include "core/or/or.h"
7 
8 #include "app/config/config.h"
9 #include "core/mainloop/connection.h"
10 #include "feature/dircache/conscache.h"
11 #include "feature/dircache/consdiffmgr.h"
12 #include "feature/dircommon/directory.h"
13 #include "feature/dircache/dirserv.h"
14 #include "feature/nodelist/microdesc.h"
15 #include "feature/nodelist/routerlist.h"
16 #include "feature/relay/router.h"
17 #include "feature/relay/routermode.h"
18 #include "feature/stats/predict_ports.h"
19 
20 #include "feature/dircache/cached_dir_st.h"
21 #include "feature/dircommon/dir_connection_st.h"
22 #include "feature/nodelist/extrainfo_st.h"
23 #include "feature/nodelist/microdesc_st.h"
24 #include "feature/nodelist/routerinfo_st.h"
25 #include "feature/nodelist/routerlist_st.h"
26 
27 #include "lib/compress/compress.h"
28 
29 /**
30  * \file dirserv.c
31  * \brief Directory server core implementation. Manages directory
32  * contents and generates directory documents.
33  *
34  * This module implements most of directory cache functionality, and some of
35  * the directory authority functionality.  The directory.c module delegates
36  * here in order to handle incoming requests from clients, via
37  * connection_dirserv_flushed_some() and its kin.  In order to save RAM, this
38  * module is responsible for spooling directory objects (in whole or in part)
39  * onto buf_t instances, and then closing the dir_connection_t once the
40  * objects are totally flushed.
41  *
42  * The directory.c module also delegates here for handling descriptor uploads
43  * via dirserv_add_multiple_descriptors().
44  *
45  * Additionally, this module handles some aspects of voting, including:
46  * deciding how to vote on individual flags (based on decisions reached in
47  * rephist.c), of formatting routerstatus lines, and deciding what relays to
48  * include in an authority's vote.  (TODO: Those functions could profitably be
49  * split off.  They only live in this file because historically they were
50  * shared among the v1, v2, and v3 directory code.)
51  */
52 
53 static void clear_cached_dir(cached_dir_t *d);
54 static const signed_descriptor_t *get_signed_descriptor_by_fp(
55                                                         const uint8_t *fp,
56                                                         int extrainfo);
57 
58 static int spooled_resource_lookup_body(const spooled_resource_t *spooled,
59                                         int conn_is_encrypted,
60                                         const uint8_t **body_out,
61                                         size_t *size_out,
62                                         time_t *published_out);
63 static cached_dir_t *spooled_resource_lookup_cached_dir(
64                                    const spooled_resource_t *spooled,
65                                    time_t *published_out);
66 static cached_dir_t *lookup_cached_dir_by_fp(const uint8_t *fp);
67 
68 /********************************************************************/
69 
70 /* A set of functions to answer questions about how we'd like to behave
71  * as a directory mirror */
72 
73 /** Return true iff we want to serve certificates for authorities
74  * that we don't acknowledge as authorities ourself.
75  * Use we_want_to_fetch_unknown_auth_certs to check if we want to fetch
76  * and keep these certificates.
77  */
78 int
directory_caches_unknown_auth_certs(const or_options_t * options)79 directory_caches_unknown_auth_certs(const or_options_t *options)
80 {
81   return dir_server_mode(options) || options->BridgeRelay;
82 }
83 
84 /** Return 1 if we want to fetch and serve descriptors, networkstatuses, etc
85  * Else return 0.
86  * Check options->DirPort_set and directory_permits_begindir_requests()
87  * to see if we are willing to serve these directory documents to others via
88  * the DirPort and begindir-over-ORPort, respectively.
89  *
90  * To check if we should fetch documents, use we_want_to_fetch_flavor and
91  * we_want_to_fetch_unknown_auth_certs instead of this function.
92  */
93 int
directory_caches_dir_info(const or_options_t * options)94 directory_caches_dir_info(const or_options_t *options)
95 {
96   if (options->BridgeRelay || dir_server_mode(options))
97     return 1;
98   if (!server_mode(options) || !advertised_server_mode())
99     return 0;
100   /* We need an up-to-date view of network info if we're going to try to
101    * block exit attempts from unknown relays. */
102   return ! router_my_exit_policy_is_reject_star() &&
103     should_refuse_unknown_exits(options);
104 }
105 
106 /** Return 1 if we want to allow remote clients to ask us directory
107  * requests via the "begin_dir" interface, which doesn't require
108  * having any separate port open. */
109 int
directory_permits_begindir_requests(const or_options_t * options)110 directory_permits_begindir_requests(const or_options_t *options)
111 {
112   return options->BridgeRelay != 0 || dir_server_mode(options);
113 }
114 
115 /********************************************************************/
116 
117 /** Map from flavor name to the cached_dir_t for the v3 consensuses that we're
118  * currently serving. */
119 static strmap_t *cached_consensuses = NULL;
120 
121 /** Decrement the reference count on <b>d</b>, and free it if it no longer has
122  * any references. */
123 void
cached_dir_decref(cached_dir_t * d)124 cached_dir_decref(cached_dir_t *d)
125 {
126   if (!d || --d->refcnt > 0)
127     return;
128   clear_cached_dir(d);
129   tor_free(d);
130 }
131 
132 /** Allocate and return a new cached_dir_t containing the string <b>s</b>,
133  * published at <b>published</b>. */
134 cached_dir_t *
new_cached_dir(char * s,time_t published)135 new_cached_dir(char *s, time_t published)
136 {
137   cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t));
138   d->refcnt = 1;
139   d->dir = s;
140   d->dir_len = strlen(s);
141   d->published = published;
142   if (tor_compress(&(d->dir_compressed), &(d->dir_compressed_len),
143                    d->dir, d->dir_len, ZLIB_METHOD)) {
144     log_warn(LD_BUG, "Error compressing directory");
145   }
146   return d;
147 }
148 
149 /** Remove all storage held in <b>d</b>, but do not free <b>d</b> itself. */
150 static void
clear_cached_dir(cached_dir_t * d)151 clear_cached_dir(cached_dir_t *d)
152 {
153   tor_free(d->dir);
154   tor_free(d->dir_compressed);
155   memset(d, 0, sizeof(cached_dir_t));
156 }
157 
158 /** Free all storage held by the cached_dir_t in <b>d</b>. */
159 static void
free_cached_dir_(void * _d)160 free_cached_dir_(void *_d)
161 {
162   cached_dir_t *d;
163   if (!_d)
164     return;
165 
166   d = (cached_dir_t *)_d;
167   cached_dir_decref(d);
168 }
169 
170 /** Replace the v3 consensus networkstatus of type <b>flavor_name</b> that
171  * we're serving with <b>networkstatus</b>, published at <b>published</b>.  No
172  * validation is performed. */
173 void
dirserv_set_cached_consensus_networkstatus(const char * networkstatus,size_t networkstatus_len,const char * flavor_name,const common_digests_t * digests,const uint8_t * sha3_as_signed,time_t published)174 dirserv_set_cached_consensus_networkstatus(const char *networkstatus,
175                                            size_t networkstatus_len,
176                                            const char *flavor_name,
177                                            const common_digests_t *digests,
178                                            const uint8_t *sha3_as_signed,
179                                            time_t published)
180 {
181   cached_dir_t *new_networkstatus;
182   cached_dir_t *old_networkstatus;
183   if (!cached_consensuses)
184     cached_consensuses = strmap_new();
185 
186   new_networkstatus =
187     new_cached_dir(tor_memdup_nulterm(networkstatus, networkstatus_len),
188                    published);
189   memcpy(&new_networkstatus->digests, digests, sizeof(common_digests_t));
190   memcpy(&new_networkstatus->digest_sha3_as_signed, sha3_as_signed,
191          DIGEST256_LEN);
192   old_networkstatus = strmap_set(cached_consensuses, flavor_name,
193                                  new_networkstatus);
194   if (old_networkstatus)
195     cached_dir_decref(old_networkstatus);
196 }
197 
198 /** Return the latest downloaded consensus networkstatus in encoded, signed,
199  * optionally compressed format, suitable for sending to clients. */
200 MOCK_IMPL(cached_dir_t *,
201 dirserv_get_consensus,(const char *flavor_name))
202 {
203   if (!cached_consensuses)
204     return NULL;
205   return strmap_get(cached_consensuses, flavor_name);
206 }
207 
208 /** As dir_split_resource_into_fingerprints, but instead fills
209  * <b>spool_out</b> with a list of spoolable_resource_t for the resource
210  * identified through <b>source</b>. */
211 int
dir_split_resource_into_spoolable(const char * resource,dir_spool_source_t source,smartlist_t * spool_out,int * compressed_out,int flags)212 dir_split_resource_into_spoolable(const char *resource,
213                                   dir_spool_source_t source,
214                                   smartlist_t *spool_out,
215                                   int *compressed_out,
216                                   int flags)
217 {
218   smartlist_t *fingerprints = smartlist_new();
219 
220   tor_assert(flags & (DSR_HEX|DSR_BASE64));
221   const size_t digest_len =
222     (flags & DSR_DIGEST256) ? DIGEST256_LEN : DIGEST_LEN;
223 
224   int r = dir_split_resource_into_fingerprints(resource, fingerprints,
225                                                compressed_out, flags);
226   /* This is not a very efficient implementation XXXX */
227   SMARTLIST_FOREACH_BEGIN(fingerprints, uint8_t *, digest) {
228     spooled_resource_t *spooled =
229       spooled_resource_new(source, digest, digest_len);
230     if (spooled)
231       smartlist_add(spool_out, spooled);
232     tor_free(digest);
233   } SMARTLIST_FOREACH_END(digest);
234 
235   smartlist_free(fingerprints);
236   return r;
237 }
238 
239 /** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t
240  * pointers, adds copies of digests to fps_out, and doesn't use the
241  * /tor/server/ prefix.  For a /d/ request, adds descriptor digests; for other
242  * requests, adds identity digests.
243  */
244 int
dirserv_get_routerdesc_spool(smartlist_t * spool_out,const char * key,dir_spool_source_t source,int conn_is_encrypted,const char ** msg_out)245 dirserv_get_routerdesc_spool(smartlist_t *spool_out,
246                              const char *key,
247                              dir_spool_source_t source,
248                              int conn_is_encrypted,
249                              const char **msg_out)
250 {
251   *msg_out = NULL;
252 
253   if (!strcmp(key, "all")) {
254     const routerlist_t *rl = router_get_routerlist();
255     SMARTLIST_FOREACH_BEGIN(rl->routers, const routerinfo_t *, r) {
256       spooled_resource_t *spooled;
257       spooled = spooled_resource_new(source,
258                               (const uint8_t *)r->cache_info.identity_digest,
259                               DIGEST_LEN);
260       /* Treat "all" requests as if they were unencrypted */
261       conn_is_encrypted = 0;
262       smartlist_add(spool_out, spooled);
263     } SMARTLIST_FOREACH_END(r);
264   } else if (!strcmp(key, "authority")) {
265     const routerinfo_t *ri = router_get_my_routerinfo();
266     if (ri)
267       smartlist_add(spool_out,
268                     spooled_resource_new(source,
269                              (const uint8_t *)ri->cache_info.identity_digest,
270                              DIGEST_LEN));
271   } else if (!strcmpstart(key, "d/")) {
272     key += strlen("d/");
273     dir_split_resource_into_spoolable(key, source, spool_out, NULL,
274                                   DSR_HEX|DSR_SORT_UNIQ);
275   } else if (!strcmpstart(key, "fp/")) {
276     key += strlen("fp/");
277     dir_split_resource_into_spoolable(key, source, spool_out, NULL,
278                                   DSR_HEX|DSR_SORT_UNIQ);
279   } else {
280     *msg_out = "Not found";
281     return -1;
282   }
283 
284   if (! conn_is_encrypted) {
285     /* Remove anything that insists it not be sent unencrypted. */
286     SMARTLIST_FOREACH_BEGIN(spool_out, spooled_resource_t *, spooled) {
287       const uint8_t *body = NULL;
288       size_t bodylen = 0;
289       int r = spooled_resource_lookup_body(spooled, conn_is_encrypted,
290                                            &body, &bodylen, NULL);
291       if (r < 0 || body == NULL || bodylen == 0) {
292         SMARTLIST_DEL_CURRENT(spool_out, spooled);
293         spooled_resource_free(spooled);
294       }
295     } SMARTLIST_FOREACH_END(spooled);
296   }
297 
298   if (!smartlist_len(spool_out)) {
299     *msg_out = "Servers unavailable";
300     return -1;
301   }
302   return 0;
303 }
304 
305 /* ==========
306  * Spooling code.
307  * ========== */
308 
309 spooled_resource_t *
spooled_resource_new(dir_spool_source_t source,const uint8_t * digest,size_t digestlen)310 spooled_resource_new(dir_spool_source_t source,
311                      const uint8_t *digest, size_t digestlen)
312 {
313   spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t));
314   spooled->spool_source = source;
315   switch (source) {
316     case DIR_SPOOL_NETWORKSTATUS:
317       spooled->spool_eagerly = 0;
318       break;
319     case DIR_SPOOL_SERVER_BY_DIGEST:
320     case DIR_SPOOL_SERVER_BY_FP:
321     case DIR_SPOOL_EXTRA_BY_DIGEST:
322     case DIR_SPOOL_EXTRA_BY_FP:
323     case DIR_SPOOL_MICRODESC:
324     default:
325       spooled->spool_eagerly = 1;
326       break;
327     case DIR_SPOOL_CONSENSUS_CACHE_ENTRY:
328       tor_assert_unreached();
329       break;
330   }
331   tor_assert(digestlen <= sizeof(spooled->digest));
332   if (digest)
333     memcpy(spooled->digest, digest, digestlen);
334   return spooled;
335 }
336 
337 /**
338  * Create a new spooled_resource_t to spool the contents of <b>entry</b> to
339  * the user.  Return the spooled object on success, or NULL on failure (which
340  * is probably caused by a failure to map the body of the item from disk).
341  *
342  * Adds a reference to entry's reference counter.
343  */
344 spooled_resource_t *
spooled_resource_new_from_cache_entry(consensus_cache_entry_t * entry)345 spooled_resource_new_from_cache_entry(consensus_cache_entry_t *entry)
346 {
347   spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t));
348   spooled->spool_source = DIR_SPOOL_CONSENSUS_CACHE_ENTRY;
349   spooled->spool_eagerly = 0;
350   consensus_cache_entry_incref(entry);
351   spooled->consensus_cache_entry = entry;
352 
353   int r = consensus_cache_entry_get_body(entry,
354                                          &spooled->cce_body,
355                                          &spooled->cce_len);
356   if (r == 0) {
357     return spooled;
358   } else {
359     spooled_resource_free(spooled);
360     return NULL;
361   }
362 }
363 
364 /** Release all storage held by <b>spooled</b>. */
365 void
spooled_resource_free_(spooled_resource_t * spooled)366 spooled_resource_free_(spooled_resource_t *spooled)
367 {
368   if (spooled == NULL)
369     return;
370 
371   if (spooled->cached_dir_ref) {
372     cached_dir_decref(spooled->cached_dir_ref);
373   }
374 
375   if (spooled->consensus_cache_entry) {
376     consensus_cache_entry_decref(spooled->consensus_cache_entry);
377   }
378 
379   tor_free(spooled);
380 }
381 
382 /** When spooling data from a cached_dir_t object, we always add
383  * at least this much. */
384 #define DIRSERV_CACHED_DIR_CHUNK_SIZE 8192
385 
386 /** Return an compression ratio for compressing objects from <b>source</b>.
387  */
388 static double
estimate_compression_ratio(dir_spool_source_t source)389 estimate_compression_ratio(dir_spool_source_t source)
390 {
391   /* We should put in better estimates here, depending on the number of
392      objects and their type */
393   (void) source;
394   return 0.5;
395 }
396 
397 /** Return an estimated number of bytes needed for transmitting the
398  * resource in <b>spooled</b> on <b>conn</b>
399  *
400  * As a convenient side-effect, set *<b>published_out</b> to the resource's
401  * publication time.
402  */
403 static size_t
spooled_resource_estimate_size(const spooled_resource_t * spooled,dir_connection_t * conn,int compressed,time_t * published_out)404 spooled_resource_estimate_size(const spooled_resource_t *spooled,
405                                dir_connection_t *conn,
406                                int compressed,
407                                time_t *published_out)
408 {
409   if (spooled->spool_eagerly) {
410     const uint8_t *body = NULL;
411     size_t bodylen = 0;
412     int r = spooled_resource_lookup_body(spooled,
413                                          connection_dir_is_encrypted(conn),
414                                          &body, &bodylen,
415                                          published_out);
416     if (r == -1 || body == NULL || bodylen == 0)
417       return 0;
418     if (compressed) {
419       double ratio = estimate_compression_ratio(spooled->spool_source);
420       bodylen = (size_t)(bodylen * ratio);
421     }
422     return bodylen;
423   } else {
424     cached_dir_t *cached;
425     if (spooled->consensus_cache_entry) {
426       if (published_out) {
427         consensus_cache_entry_get_valid_after(
428             spooled->consensus_cache_entry, published_out);
429       }
430 
431       return spooled->cce_len;
432     }
433     if (spooled->cached_dir_ref) {
434       cached = spooled->cached_dir_ref;
435     } else {
436       cached = spooled_resource_lookup_cached_dir(spooled,
437                                                   published_out);
438     }
439     if (cached == NULL) {
440       return 0;
441     }
442     size_t result = compressed ? cached->dir_compressed_len : cached->dir_len;
443     return result;
444   }
445 }
446 
447 /** Return code for spooled_resource_flush_some */
448 typedef enum {
449   SRFS_ERR = -1,
450   SRFS_MORE = 0,
451   SRFS_DONE
452 } spooled_resource_flush_status_t;
453 
454 /** Flush some or all of the bytes from <b>spooled</b> onto <b>conn</b>.
455  * Return SRFS_ERR on error, SRFS_MORE if there are more bytes to flush from
456  * this spooled resource, or SRFS_DONE if we are done flushing this spooled
457  * resource.
458  */
459 static spooled_resource_flush_status_t
spooled_resource_flush_some(spooled_resource_t * spooled,dir_connection_t * conn)460 spooled_resource_flush_some(spooled_resource_t *spooled,
461                             dir_connection_t *conn)
462 {
463   if (spooled->spool_eagerly) {
464     /* Spool_eagerly resources are sent all-at-once. */
465     const uint8_t *body = NULL;
466     size_t bodylen = 0;
467     int r = spooled_resource_lookup_body(spooled,
468                                          connection_dir_is_encrypted(conn),
469                                          &body, &bodylen, NULL);
470     if (r == -1 || body == NULL || bodylen == 0) {
471       /* Absent objects count as "done". */
472       return SRFS_DONE;
473     }
474 
475     connection_dir_buf_add((const char*)body, bodylen, conn, 0);
476 
477     return SRFS_DONE;
478   } else {
479     cached_dir_t *cached = spooled->cached_dir_ref;
480     consensus_cache_entry_t *cce = spooled->consensus_cache_entry;
481     if (cached == NULL && cce == NULL) {
482       /* The cached_dir_t hasn't been materialized yet. So let's look it up. */
483       cached = spooled->cached_dir_ref =
484         spooled_resource_lookup_cached_dir(spooled, NULL);
485       if (!cached) {
486         /* Absent objects count as done. */
487         return SRFS_DONE;
488       }
489       ++cached->refcnt;
490       tor_assert_nonfatal(spooled->cached_dir_offset == 0);
491     }
492 
493     if (BUG(!cached && !cce))
494       return SRFS_DONE;
495 
496     int64_t total_len;
497     const char *ptr;
498     if (cached) {
499       total_len = cached->dir_compressed_len;
500       ptr = cached->dir_compressed;
501     } else {
502       total_len = spooled->cce_len;
503       ptr = (const char *)spooled->cce_body;
504     }
505     /* How many bytes left to flush? */
506     int64_t remaining;
507     remaining = total_len - spooled->cached_dir_offset;
508     if (BUG(remaining < 0))
509       return SRFS_ERR;
510     ssize_t bytes = (ssize_t) MIN(DIRSERV_CACHED_DIR_CHUNK_SIZE, remaining);
511 
512     connection_dir_buf_add(ptr + spooled->cached_dir_offset,
513                            bytes, conn, 0);
514 
515     spooled->cached_dir_offset += bytes;
516     if (spooled->cached_dir_offset >= (off_t)total_len) {
517       return SRFS_DONE;
518     } else {
519       return SRFS_MORE;
520     }
521   }
522 }
523 
524 /** Helper: find the cached_dir_t for a spooled_resource_t, for
525  * sending it to <b>conn</b>. Set *<b>published_out</b>, if provided,
526  * to the published time of the cached_dir_t.
527  *
528  * DOES NOT increase the reference count on the result.  Callers must do that
529  * themselves if they mean to hang on to it.
530  */
531 static cached_dir_t *
spooled_resource_lookup_cached_dir(const spooled_resource_t * spooled,time_t * published_out)532 spooled_resource_lookup_cached_dir(const spooled_resource_t *spooled,
533                                    time_t *published_out)
534 {
535   tor_assert(spooled->spool_eagerly == 0);
536   cached_dir_t *d = lookup_cached_dir_by_fp(spooled->digest);
537   if (d != NULL) {
538     if (published_out)
539       *published_out = d->published;
540   }
541   return d;
542 }
543 
544 /** Helper: Look up the body for an eagerly-served spooled_resource.  If
545  * <b>conn_is_encrypted</b> is false, don't look up any resource that
546  * shouldn't be sent over an unencrypted connection.  On success, set
547  * <b>body_out</b>, <b>size_out</b>, and <b>published_out</b> to refer
548  * to the resource's body, size, and publication date, and return 0.
549  * On failure return -1. */
550 static int
spooled_resource_lookup_body(const spooled_resource_t * spooled,int conn_is_encrypted,const uint8_t ** body_out,size_t * size_out,time_t * published_out)551 spooled_resource_lookup_body(const spooled_resource_t *spooled,
552                              int conn_is_encrypted,
553                              const uint8_t **body_out,
554                              size_t *size_out,
555                              time_t *published_out)
556 {
557   tor_assert(spooled->spool_eagerly == 1);
558 
559   const signed_descriptor_t *sd = NULL;
560 
561   switch (spooled->spool_source) {
562     case DIR_SPOOL_EXTRA_BY_FP: {
563       sd = get_signed_descriptor_by_fp(spooled->digest, 1);
564       break;
565     }
566     case DIR_SPOOL_SERVER_BY_FP: {
567       sd = get_signed_descriptor_by_fp(spooled->digest, 0);
568       break;
569     }
570     case DIR_SPOOL_SERVER_BY_DIGEST: {
571       sd = router_get_by_descriptor_digest((const char *)spooled->digest);
572       break;
573     }
574     case DIR_SPOOL_EXTRA_BY_DIGEST: {
575       sd = extrainfo_get_by_descriptor_digest((const char *)spooled->digest);
576       break;
577     }
578     case DIR_SPOOL_MICRODESC: {
579       microdesc_t *md = microdesc_cache_lookup_by_digest256(
580                                   get_microdesc_cache(),
581                                   (const char *)spooled->digest);
582       if (! md || ! md->body) {
583         return -1;
584       }
585       *body_out = (const uint8_t *)md->body;
586       *size_out = md->bodylen;
587       if (published_out)
588         *published_out = TIME_MAX;
589       return 0;
590     }
591     case DIR_SPOOL_NETWORKSTATUS:
592     case DIR_SPOOL_CONSENSUS_CACHE_ENTRY:
593     default:
594       /* LCOV_EXCL_START */
595       tor_assert_nonfatal_unreached();
596       return -1;
597       /* LCOV_EXCL_STOP */
598   }
599 
600   /* If we get here, then we tried to set "sd" to a signed_descriptor_t. */
601 
602   if (sd == NULL) {
603     return -1;
604   }
605   if (sd->send_unencrypted == 0 && ! conn_is_encrypted) {
606     /* we did this check once before (so we could have an accurate size
607      * estimate and maybe send a 404 if somebody asked for only bridges on
608      * a connection), but we need to do it again in case a previously
609      * unknown bridge descriptor has shown up between then and now. */
610     return -1;
611   }
612   *body_out = (const uint8_t *) signed_descriptor_get_body(sd);
613   *size_out = sd->signed_descriptor_len;
614   if (published_out)
615     *published_out = sd->published_on;
616   return 0;
617 }
618 
619 /** Given a fingerprint <b>fp</b> which is either set if we're looking for a
620  * v2 status, or zeroes if we're looking for a v3 status, or a NUL-padded
621  * flavor name if we want a flavored v3 status, return a pointer to the
622  * appropriate cached dir object, or NULL if there isn't one available. */
623 static cached_dir_t *
lookup_cached_dir_by_fp(const uint8_t * fp)624 lookup_cached_dir_by_fp(const uint8_t *fp)
625 {
626   cached_dir_t *d = NULL;
627   if (tor_digest_is_zero((const char *)fp) && cached_consensuses) {
628     d = strmap_get(cached_consensuses, "ns");
629   } else if (memchr(fp, '\0', DIGEST_LEN) && cached_consensuses) {
630     /* this here interface is a nasty hack: we're shoving a flavor into
631      * a digest field. */
632     d = strmap_get(cached_consensuses, (const char *)fp);
633   }
634   return d;
635 }
636 
637 /** Try to guess the number of bytes that will be needed to send the
638  * spooled objects for <b>conn</b>'s outgoing spool.  In the process,
639  * remove every element of the spool that refers to an absent object, or
640  * which was published earlier than <b>cutoff</b>.  Set *<b>size_out</b>
641  * to the number of bytes, and *<b>n_expired_out</b> to the number of
642  * objects removed for being too old. */
643 void
dirserv_spool_remove_missing_and_guess_size(dir_connection_t * conn,time_t cutoff,int compression,size_t * size_out,int * n_expired_out)644 dirserv_spool_remove_missing_and_guess_size(dir_connection_t *conn,
645                                             time_t cutoff,
646                                             int compression,
647                                             size_t *size_out,
648                                             int *n_expired_out)
649 {
650   if (BUG(!conn))
651     return;
652 
653   smartlist_t *spool = conn->spool;
654   if (!spool) {
655     if (size_out)
656       *size_out = 0;
657     if (n_expired_out)
658       *n_expired_out = 0;
659     return;
660   }
661   int n_expired = 0;
662   uint64_t total = 0;
663   SMARTLIST_FOREACH_BEGIN(spool, spooled_resource_t *, spooled) {
664     time_t published = TIME_MAX;
665     size_t sz = spooled_resource_estimate_size(spooled, conn,
666                                                compression, &published);
667     if (published < cutoff) {
668       ++n_expired;
669       SMARTLIST_DEL_CURRENT(spool, spooled);
670       spooled_resource_free(spooled);
671     } else if (sz == 0) {
672       SMARTLIST_DEL_CURRENT(spool, spooled);
673       spooled_resource_free(spooled);
674     } else {
675       total += sz;
676     }
677   } SMARTLIST_FOREACH_END(spooled);
678 
679   if (size_out) {
680     *size_out = (total > SIZE_MAX) ? SIZE_MAX : (size_t)total;
681   }
682   if (n_expired_out)
683     *n_expired_out = n_expired;
684 }
685 
686 /** Helper: used to sort a connection's spool. */
687 static int
dirserv_spool_sort_comparison_(const void ** a_,const void ** b_)688 dirserv_spool_sort_comparison_(const void **a_, const void **b_)
689 {
690   const spooled_resource_t *a = *a_;
691   const spooled_resource_t *b = *b_;
692   return fast_memcmp(a->digest, b->digest, sizeof(a->digest));
693 }
694 
695 /** Sort all the entries in <b>conn</b> by digest. */
696 void
dirserv_spool_sort(dir_connection_t * conn)697 dirserv_spool_sort(dir_connection_t *conn)
698 {
699   if (conn->spool == NULL)
700     return;
701   smartlist_sort(conn->spool, dirserv_spool_sort_comparison_);
702 }
703 
704 /** Return the cache-info for identity fingerprint <b>fp</b>, or
705  * its extra-info document if <b>extrainfo</b> is true. Return
706  * NULL if not found or if the descriptor is older than
707  * <b>publish_cutoff</b>. */
708 static const signed_descriptor_t *
get_signed_descriptor_by_fp(const uint8_t * fp,int extrainfo)709 get_signed_descriptor_by_fp(const uint8_t *fp, int extrainfo)
710 {
711   if (router_digest_is_me((const char *)fp)) {
712     if (extrainfo)
713       return &(router_get_my_extrainfo()->cache_info);
714     else
715       return &(router_get_my_routerinfo()->cache_info);
716   } else {
717     const routerinfo_t *ri = router_get_by_id_digest((const char *)fp);
718     if (ri) {
719       if (extrainfo)
720         return extrainfo_get_by_descriptor_digest(
721                                      ri->cache_info.extra_info_digest);
722       else
723         return &ri->cache_info;
724     }
725   }
726   return NULL;
727 }
728 
729 /** When we're spooling data onto our outbuf, add more whenever we dip
730  * below this threshold. */
731 #define DIRSERV_BUFFER_MIN 16384
732 
733 /**
734  * Called whenever we have flushed some directory data in state
735  * SERVER_WRITING, or whenever we want to fill the buffer with initial
736  * directory data (so that subsequent writes will occur, and trigger this
737  * function again.)
738  *
739  * Return 0 on success, and -1 on failure.
740  */
741 int
connection_dirserv_flushed_some(dir_connection_t * conn)742 connection_dirserv_flushed_some(dir_connection_t *conn)
743 {
744   tor_assert(conn->base_.state == DIR_CONN_STATE_SERVER_WRITING);
745   if (conn->spool == NULL)
746     return 0;
747 
748   while (connection_get_outbuf_len(TO_CONN(conn)) < DIRSERV_BUFFER_MIN &&
749          smartlist_len(conn->spool)) {
750     spooled_resource_t *spooled =
751       smartlist_get(conn->spool, smartlist_len(conn->spool)-1);
752     spooled_resource_flush_status_t status;
753     status = spooled_resource_flush_some(spooled, conn);
754     if (status == SRFS_ERR) {
755       return -1;
756     } else if (status == SRFS_MORE) {
757       return 0;
758     }
759     tor_assert(status == SRFS_DONE);
760 
761     /* If we're here, we're done flushing this resource. */
762     tor_assert(smartlist_pop_last(conn->spool) == spooled);
763     spooled_resource_free(spooled);
764   }
765 
766   if (smartlist_len(conn->spool) > 0) {
767     /* We're still spooling something. */
768     return 0;
769   }
770 
771   /* If we get here, we're done. */
772   smartlist_free(conn->spool);
773   conn->spool = NULL;
774   if (conn->compress_state) {
775     /* Flush the compression state: there could be more bytes pending in there,
776      * and we don't want to omit bytes. */
777     connection_buf_add_compress("", 0, conn, 1);
778     tor_compress_free(conn->compress_state);
779     conn->compress_state = NULL;
780   }
781   return 0;
782 }
783 
784 /** Remove every element from <b>conn</b>'s outgoing spool, and delete
785  * the spool. */
786 void
dir_conn_clear_spool(dir_connection_t * conn)787 dir_conn_clear_spool(dir_connection_t *conn)
788 {
789   if (!conn || ! conn->spool)
790     return;
791   SMARTLIST_FOREACH(conn->spool, spooled_resource_t *, s,
792                     spooled_resource_free(s));
793   smartlist_free(conn->spool);
794   conn->spool = NULL;
795 }
796 
797 /** Release all storage used by the directory server. */
798 void
dirserv_free_all(void)799 dirserv_free_all(void)
800 {
801   strmap_free(cached_consensuses, free_cached_dir_);
802   cached_consensuses = NULL;
803 }
804