1 //! Implementation for the primary directory state machine.
2 //!
3 //! There are three (active) states that a download can be in: looking
4 //! for a consensus ([`GetConsensusState`]), looking for certificates
5 //! to validate that consensus ([`GetCertsState`]), and looking for
6 //! microdescriptors ([`GetMicrodescsState`]).
7 //!
8 //! These states have no contact with the network, and are purely
9 //! reactive to other code that drives them. See the
10 //! [`bootstrap`](crate::bootstrap) module for functions that actually
11 //! load or download directory information.
12
13 use rand::Rng;
14 use std::collections::{HashMap, HashSet};
15 use std::fmt::Debug;
16 use std::sync::{Mutex, Weak};
17 use std::time::{Duration, SystemTime};
18 use time::OffsetDateTime;
19 use tor_netdir::{MdReceiver, NetDir, PartialNetDir};
20 use tor_netdoc::doc::netstatus::Lifetime;
21 use tracing::{info, warn};
22
23 use crate::{
24 docmeta::{AuthCertMeta, ConsensusMeta},
25 retry::DownloadSchedule,
26 shared_ref::SharedMutArc,
27 storage::sqlite::SqliteStore,
28 CacheUsage, ClientRequest, DirMgrConfig, DirState, DocId, DocumentText, Error, Readiness,
29 Result,
30 };
31 use tor_checkable::{ExternallySigned, SelfSigned, Timebound};
32 use tor_llcrypto::pk::rsa::RsaIdentity;
33 use tor_netdoc::doc::{
34 microdesc::{MdDigest, Microdesc},
35 netstatus::MdConsensus,
36 };
37 use tor_netdoc::{
38 doc::{
39 authcert::{AuthCert, AuthCertKeyIds},
40 microdesc::MicrodescReader,
41 netstatus::{ConsensusFlavor, UnvalidatedMdConsensus},
42 },
43 AllowAnnotations,
44 };
45 use tor_rtcompat::Runtime;
46
47 /// An object where we can put a usable netdir.
48 ///
49 /// Note that there's only one implementation for this trait: DirMgr.
50 /// We make this a trait anyway to make sure that the different states
51 /// in this module can _only_ interact with the DirMgr through
52 /// modifying the NetDir and looking at the configuration.
53 pub(crate) trait WriteNetDir: 'static + Sync + Send {
54 /// Return a DirMgrConfig to use when asked how to retry downloads,
55 /// or when we need to find a list of descriptors.
config(&self) -> &DirMgrConfig56 fn config(&self) -> &DirMgrConfig;
57
58 /// Return a reference where we can write or modify a NetDir.
netdir(&self) -> &SharedMutArc<NetDir>59 fn netdir(&self) -> &SharedMutArc<NetDir>;
60
61 /// Called to note that the consensus stored in [`Self::netdir()`] has been
62 /// changed.
netdir_consensus_changed(&self)63 fn netdir_consensus_changed(&self);
64
65 /// Called to note that the descriptors stored in
66 /// [`Self::netdir()`] have been changed.
netdir_descriptors_changed(&self)67 fn netdir_descriptors_changed(&self);
68
69 /// Called to find the current time.
70 ///
71 /// This is just `SystemTime::now()` in production, but for
72 /// testing it is helpful to be able to mock our our current view
73 /// of the time.
now(&self) -> SystemTime74 fn now(&self) -> SystemTime;
75 }
76
77 impl<R: Runtime> WriteNetDir for crate::DirMgr<R> {
config(&self) -> &DirMgrConfig78 fn config(&self) -> &DirMgrConfig {
79 &self.config
80 }
netdir(&self) -> &SharedMutArc<NetDir>81 fn netdir(&self) -> &SharedMutArc<NetDir> {
82 &self.netdir
83 }
netdir_consensus_changed(&self)84 fn netdir_consensus_changed(&self) {
85 use std::sync::atomic::Ordering;
86 self.netdir_consensus_changed.store(true, Ordering::SeqCst);
87 }
netdir_descriptors_changed(&self)88 fn netdir_descriptors_changed(&self) {
89 use std::sync::atomic::Ordering;
90 self.netdir_descriptors_changed
91 .store(true, Ordering::SeqCst);
92 }
now(&self) -> SystemTime93 fn now(&self) -> SystemTime {
94 SystemTime::now()
95 }
96 }
97
98 /// Initial state: fetching or loading a consensus directory.
99 #[derive(Clone, Debug)]
100 pub(crate) struct GetConsensusState<DM: WriteNetDir> {
101 /// How should we get the consensus from the cache, if at all?
102 cache_usage: CacheUsage,
103
104 /// If present, our next state.
105 ///
106 /// (This is present once we have a consensus.)
107 next: Option<GetCertsState<DM>>,
108
109 /// A list of RsaIdentity for the authorities that we believe in.
110 ///
111 /// No consensus can be valid unless it purports to be signed by
112 /// more than half of these authorities.
113 authority_ids: Vec<RsaIdentity>,
114
115 /// A weak reference to the directory manager that wants us to
116 /// fetch this information. When this references goes away, we exit.
117 writedir: Weak<DM>,
118 }
119
120 impl<DM: WriteNetDir> GetConsensusState<DM> {
121 /// Create a new GetConsensusState from a weak reference to a
122 /// directory manager and a `cache_usage` flag.
new(writedir: Weak<DM>, cache_usage: CacheUsage) -> Result<Self>123 pub(crate) fn new(writedir: Weak<DM>, cache_usage: CacheUsage) -> Result<Self> {
124 let authority_ids: Vec<_> = if let Some(writedir) = Weak::upgrade(&writedir) {
125 writedir
126 .config()
127 .authorities()
128 .iter()
129 .map(|auth| *auth.v3ident())
130 .collect()
131 } else {
132 return Err(Error::ManagerDropped);
133 };
134 Ok(GetConsensusState {
135 cache_usage,
136 next: None,
137 authority_ids,
138 writedir,
139 })
140 }
141 }
142
143 impl<DM: WriteNetDir> DirState for GetConsensusState<DM> {
describe(&self) -> String144 fn describe(&self) -> String {
145 if self.next.is_some() {
146 "About to fetch certificates."
147 } else {
148 match self.cache_usage {
149 CacheUsage::CacheOnly => "Looking for a cached consensus.",
150 CacheUsage::CacheOkay => "Looking for a consensus.",
151 CacheUsage::MustDownload => "Downloading a consensus.",
152 }
153 }
154 .to_string()
155 }
missing_docs(&self) -> Vec<DocId>156 fn missing_docs(&self) -> Vec<DocId> {
157 if self.can_advance() {
158 return Vec::new();
159 }
160 let flavor = ConsensusFlavor::Microdesc;
161 vec![DocId::LatestConsensus {
162 flavor,
163 cache_usage: self.cache_usage,
164 }]
165 }
is_ready(&self, _ready: Readiness) -> bool166 fn is_ready(&self, _ready: Readiness) -> bool {
167 false
168 }
can_advance(&self) -> bool169 fn can_advance(&self) -> bool {
170 self.next.is_some()
171 }
dl_config(&self) -> Result<DownloadSchedule>172 fn dl_config(&self) -> Result<DownloadSchedule> {
173 if let Some(wd) = Weak::upgrade(&self.writedir) {
174 Ok(*wd.config().schedule().retry_consensus())
175 } else {
176 Err(Error::ManagerDropped)
177 }
178 }
add_from_cache( &mut self, docs: HashMap<DocId, DocumentText>, _storage: Option<&Mutex<SqliteStore>>, ) -> Result<bool>179 fn add_from_cache(
180 &mut self,
181 docs: HashMap<DocId, DocumentText>,
182 _storage: Option<&Mutex<SqliteStore>>,
183 ) -> Result<bool> {
184 let text = match docs.into_iter().next() {
185 None => return Ok(false),
186 Some((
187 DocId::LatestConsensus {
188 flavor: ConsensusFlavor::Microdesc,
189 ..
190 },
191 text,
192 )) => text,
193 _ => return Err(Error::Unwanted("Not an md consensus")),
194 };
195
196 self.add_consensus_text(true, text.as_str()?)
197 .map(|meta| meta.is_some())
198 }
add_from_download( &mut self, text: &str, _request: &ClientRequest, storage: Option<&Mutex<SqliteStore>>, ) -> Result<bool>199 fn add_from_download(
200 &mut self,
201 text: &str,
202 _request: &ClientRequest,
203 storage: Option<&Mutex<SqliteStore>>,
204 ) -> Result<bool> {
205 if let Some(meta) = self.add_consensus_text(false, text)? {
206 if let Some(store) = storage {
207 let mut w = store.lock().expect("Directory storage lock poisoned");
208 w.store_consensus(meta, ConsensusFlavor::Microdesc, true, text)?;
209 }
210 Ok(true)
211 } else {
212 Ok(false)
213 }
214 }
advance(self: Box<Self>) -> Result<Box<dyn DirState>>215 fn advance(self: Box<Self>) -> Result<Box<dyn DirState>> {
216 Ok(match self.next {
217 Some(next) => Box::new(next),
218 None => self,
219 })
220 }
reset_time(&self) -> Option<SystemTime>221 fn reset_time(&self) -> Option<SystemTime> {
222 None
223 }
reset(self: Box<Self>) -> Result<Box<dyn DirState>>224 fn reset(self: Box<Self>) -> Result<Box<dyn DirState>> {
225 Ok(self)
226 }
227 }
228
229 impl<DM: WriteNetDir> GetConsensusState<DM> {
230 /// Helper: try to set the current consensus text from an input
231 /// string `text`. Refuse it if the authorities could never be
232 /// correct, or if it is ill-formed.
add_consensus_text( &mut self, from_cache: bool, text: &str, ) -> Result<Option<&ConsensusMeta>>233 fn add_consensus_text(
234 &mut self,
235 from_cache: bool,
236 text: &str,
237 ) -> Result<Option<&ConsensusMeta>> {
238 // Try to parse it and get its metadata.
239 let (consensus_meta, unvalidated) = {
240 let (signedval, remainder, parsed) = MdConsensus::parse(text)?;
241 let now = current_time(&self.writedir)?;
242 if let Ok(timely) = parsed.check_valid_at(&now) {
243 let meta = ConsensusMeta::from_unvalidated(signedval, remainder, &timely);
244 (meta, timely)
245 } else {
246 return Ok(None);
247 }
248 };
249
250 // Check out what authorities we believe in, and see if enough
251 // of them are purported to have signed this consensus.
252 let n_authorities = self.authority_ids.len() as u16;
253 let unvalidated = unvalidated.set_n_authorities(n_authorities);
254
255 let id_refs: Vec<_> = self.authority_ids.iter().collect();
256 if !unvalidated.authorities_are_correct(&id_refs[..]) {
257 return Err(Error::UnrecognizedAuthorities);
258 }
259
260 // Make a set of all the certificates we want -- the subset of
261 // those listed on the consensus that we would indeed accept as
262 // authoritative.
263 let desired_certs = unvalidated
264 .signing_cert_ids()
265 .filter(|m| self.recognizes_authority(&m.id_fingerprint))
266 .collect();
267
268 self.next = Some(GetCertsState {
269 cache_usage: self.cache_usage,
270 from_cache,
271 unvalidated,
272 consensus_meta,
273 missing_certs: desired_certs,
274 certs: Vec::new(),
275 writedir: Weak::clone(&self.writedir),
276 });
277
278 // Unwrap should be safe because `next` was just assigned
279 #[allow(clippy::unwrap_used)]
280 Ok(Some(&self.next.as_ref().unwrap().consensus_meta))
281 }
282
283 /// Return true if `id` is an authority identity we recognize
recognizes_authority(&self, id: &RsaIdentity) -> bool284 fn recognizes_authority(&self, id: &RsaIdentity) -> bool {
285 self.authority_ids.iter().any(|auth| auth == id)
286 }
287 }
288
289 /// Second state: fetching or loading authority certificates.
290 ///
291 /// TODO: we should probably do what C tor does, and try to use the
292 /// same directory that gave us the consensus.
293 ///
294 /// TODO SECURITY: This needs better handling for the DOS attack where
295 /// we are given a bad consensus signed with fictional certificates
296 /// that we can never find.
297 #[derive(Clone, Debug)]
298 struct GetCertsState<DM: WriteNetDir> {
299 /// The cache usage we had in mind when we began. Used to reset.
300 cache_usage: CacheUsage,
301 /// True iff we loaded the consensus from our cache.
302 #[allow(dead_code)]
303 from_cache: bool,
304 /// The consensus that we are trying to validate.
305 unvalidated: UnvalidatedMdConsensus,
306 /// Metadata for the consensus.
307 consensus_meta: ConsensusMeta,
308 /// A set of the certificate keypairs for the certificates we don't
309 /// have yet.
310 missing_certs: HashSet<AuthCertKeyIds>,
311 /// A list of the certificates we've been able to load or download.
312 certs: Vec<AuthCert>,
313 /// Reference to our directory manager.
314 writedir: Weak<DM>,
315 }
316
317 impl<DM: WriteNetDir> DirState for GetCertsState<DM> {
describe(&self) -> String318 fn describe(&self) -> String {
319 let total = self.certs.len() + self.missing_certs.len();
320 format!(
321 "Downloading certificates for consensus (we are missing {}/{}).",
322 self.missing_certs.len(),
323 total
324 )
325 }
missing_docs(&self) -> Vec<DocId>326 fn missing_docs(&self) -> Vec<DocId> {
327 self.missing_certs
328 .iter()
329 .map(|id| DocId::AuthCert(*id))
330 .collect()
331 }
is_ready(&self, _ready: Readiness) -> bool332 fn is_ready(&self, _ready: Readiness) -> bool {
333 false
334 }
can_advance(&self) -> bool335 fn can_advance(&self) -> bool {
336 self.unvalidated.key_is_correct(&self.certs[..]).is_ok()
337 }
dl_config(&self) -> Result<DownloadSchedule>338 fn dl_config(&self) -> Result<DownloadSchedule> {
339 if let Some(wd) = Weak::upgrade(&self.writedir) {
340 Ok(*wd.config().schedule().retry_certs())
341 } else {
342 Err(Error::ManagerDropped)
343 }
344 }
add_from_cache( &mut self, docs: HashMap<DocId, DocumentText>, _storage: Option<&Mutex<SqliteStore>>, ) -> Result<bool>345 fn add_from_cache(
346 &mut self,
347 docs: HashMap<DocId, DocumentText>,
348 _storage: Option<&Mutex<SqliteStore>>,
349 ) -> Result<bool> {
350 let mut changed = false;
351 // Here we iterate over the documents we want, taking them from
352 // our input and remembering them.
353 for id in &self.missing_docs() {
354 if let Some(cert) = docs.get(id) {
355 let parsed = AuthCert::parse(cert.as_str()?)?.check_signature()?;
356 let now = current_time(&self.writedir)?;
357 if let Ok(cert) = parsed.check_valid_at(&now) {
358 self.missing_certs.remove(cert.key_ids());
359 self.certs.push(cert);
360 changed = true;
361 } else {
362 warn!("Got a cert from our cache that we couldn't parse");
363 }
364 }
365 }
366 Ok(changed)
367 }
add_from_download( &mut self, text: &str, request: &ClientRequest, storage: Option<&Mutex<SqliteStore>>, ) -> Result<bool>368 fn add_from_download(
369 &mut self,
370 text: &str,
371 request: &ClientRequest,
372 storage: Option<&Mutex<SqliteStore>>,
373 ) -> Result<bool> {
374 let asked_for: HashSet<_> = match request {
375 ClientRequest::AuthCert(a) => a.keys().collect(),
376 _ => return Err(Error::BadArgument("Mismatched request")),
377 };
378
379 let mut newcerts = Vec::new();
380 for cert in AuthCert::parse_multiple(text) {
381 if let Ok(parsed) = cert {
382 let s = parsed
383 .within(text)
384 .expect("Certificate was not in input as expected");
385 if let Ok(wellsigned) = parsed.check_signature() {
386 let now = current_time(&self.writedir)?;
387 if let Ok(timely) = wellsigned.check_valid_at(&now) {
388 newcerts.push((timely, s));
389 }
390 } else {
391 // TODO: note the source.
392 warn!("Badly signed certificate received and discarded.");
393 }
394 } else {
395 // TODO: note the source.
396 warn!("Unparsable certificate received and discarded.");
397 }
398 }
399
400 // Now discard any certs we didn't ask for.
401 let len_orig = newcerts.len();
402 newcerts.retain(|(cert, _)| asked_for.contains(cert.key_ids()));
403 if newcerts.len() != len_orig {
404 warn!("Discarding certificates that we didn't ask for.");
405 }
406
407 // We want to exit early if we aren't saving any certificates.
408 if newcerts.is_empty() {
409 return Ok(false);
410 }
411
412 if let Some(store) = storage {
413 // Write the certificates to the store.
414 let v: Vec<_> = newcerts[..]
415 .iter()
416 .map(|(cert, s)| (AuthCertMeta::from_authcert(cert), *s))
417 .collect();
418 let mut w = store.lock().expect("Directory storage lock poisoned");
419 w.store_authcerts(&v[..])?;
420 }
421
422 // Remember the certificates in this state, and remove them
423 // from our list of missing certs.
424 let mut changed = false;
425 for (cert, _) in newcerts {
426 let ids = cert.key_ids();
427 if self.missing_certs.contains(ids) {
428 self.missing_certs.remove(ids);
429 self.certs.push(cert);
430 changed = true;
431 }
432 }
433
434 Ok(changed)
435 }
advance(self: Box<Self>) -> Result<Box<dyn DirState>>436 fn advance(self: Box<Self>) -> Result<Box<dyn DirState>> {
437 if self.can_advance() {
438 let validated = self.unvalidated.check_signature(&self.certs[..])?;
439 Ok(Box::new(GetMicrodescsState::new(
440 validated,
441 self.consensus_meta,
442 self.writedir,
443 )?))
444 } else {
445 Ok(self)
446 }
447 }
reset_time(&self) -> Option<SystemTime>448 fn reset_time(&self) -> Option<SystemTime> {
449 Some(self.consensus_meta.lifetime().valid_until())
450 }
reset(self: Box<Self>) -> Result<Box<dyn DirState>>451 fn reset(self: Box<Self>) -> Result<Box<dyn DirState>> {
452 Ok(Box::new(GetConsensusState::new(
453 self.writedir,
454 self.cache_usage,
455 )?))
456 }
457 }
458
459 /// Final state: we're fetching or loading microdescriptors
460 #[derive(Debug, Clone)]
461 struct GetMicrodescsState<DM: WriteNetDir> {
462 /// The digests of the microdescriptors we are missing.
463 missing: HashSet<MdDigest>,
464 /// The dirmgr to inform about a usable directory.
465 writedir: Weak<DM>,
466 /// A NetDir that we are currently building, but which doesn't
467 /// have enough microdescs yet.
468 partial: Option<PartialNetDir>,
469 /// Metadata for the current consensus.
470 meta: ConsensusMeta,
471 /// A pending list of microdescriptor digests whose
472 /// "last-listed-at" times we should update.
473 newly_listed: Vec<MdDigest>,
474 /// A time after which we should try to replace this directory and
475 /// find a new one. Since this is randomized, we only compute it
476 /// once.
477 reset_time: SystemTime,
478 }
479
480 impl<DM: WriteNetDir> GetMicrodescsState<DM> {
481 /// Create a new [`GetMicrodescsState`] from a provided
482 /// microdescriptor consensus.
new(consensus: MdConsensus, meta: ConsensusMeta, writedir: Weak<DM>) -> Result<Self>483 fn new(consensus: MdConsensus, meta: ConsensusMeta, writedir: Weak<DM>) -> Result<Self> {
484 let reset_time = consensus.lifetime().valid_until();
485
486 let partial_dir = match Weak::upgrade(&writedir) {
487 Some(wd) => {
488 let params = wd.config().override_net_params();
489 let mut dir = PartialNetDir::new(consensus, Some(params));
490 if let Some(old_dir) = wd.netdir().get() {
491 dir.fill_from_previous_netdir(&old_dir);
492 }
493 dir
494 }
495 None => return Err(Error::ManagerDropped),
496 };
497
498 let missing = partial_dir.missing_microdescs().map(Clone::clone).collect();
499 let mut result = GetMicrodescsState {
500 missing,
501 writedir,
502 partial: Some(partial_dir),
503 meta,
504 newly_listed: Vec::new(),
505 reset_time,
506 };
507
508 result.consider_upgrade();
509 Ok(result)
510 }
511
512 /// Add a bunch of microdescriptors to the in-progress netdir.
513 ///
514 /// Return true if the netdir has just become usable.
register_microdescs<I>(&mut self, mds: I) -> bool where I: IntoIterator<Item = Microdesc>,515 fn register_microdescs<I>(&mut self, mds: I) -> bool
516 where
517 I: IntoIterator<Item = Microdesc>,
518 {
519 if let Some(p) = &mut self.partial {
520 for md in mds {
521 self.newly_listed.push(*md.digest());
522 p.add_microdesc(md);
523 }
524 return self.consider_upgrade();
525 } else if let Some(wd) = Weak::upgrade(&self.writedir) {
526 let _ = wd.netdir().mutate(|netdir| {
527 for md in mds {
528 netdir.add_microdesc(md);
529 }
530 wd.netdir_descriptors_changed();
531 Ok(())
532 });
533 }
534 false
535 }
536
537 /// Check whether this netdir we're building has _just_ become
538 /// usable when it was not previously usable. If so, tell the
539 /// dirmgr about it and return true; otherwise return false.
consider_upgrade(&mut self) -> bool540 fn consider_upgrade(&mut self) -> bool {
541 if let Some(p) = self.partial.take() {
542 match p.unwrap_if_sufficient() {
543 Ok(netdir) => {
544 self.reset_time = pick_download_time(netdir.lifetime());
545 if let Some(wd) = Weak::upgrade(&self.writedir) {
546 wd.netdir().replace(netdir);
547 wd.netdir_consensus_changed();
548 wd.netdir_descriptors_changed();
549 return true;
550 }
551 }
552 Err(partial) => self.partial = Some(partial),
553 }
554 }
555 false
556 }
557
558 /// Mark the consensus that we're getting MDs for as non-pending in the
559 /// storage.
560 ///
561 /// Called when a consensus is no longer pending.
mark_consensus_usable(&self, storage: Option<&Mutex<SqliteStore>>) -> Result<()>562 fn mark_consensus_usable(&self, storage: Option<&Mutex<SqliteStore>>) -> Result<()> {
563 if let Some(store) = storage {
564 let mut store = store.lock().expect("Directory storage lock poisoned");
565 info!("Marked consensus usable.");
566 store.mark_consensus_usable(&self.meta)?;
567 // Now that a consensus is usable, older consensuses may
568 // need to expire.
569 store.expire_all()?;
570 }
571 Ok(())
572 }
573 }
574
575 impl<DM: WriteNetDir> DirState for GetMicrodescsState<DM> {
describe(&self) -> String576 fn describe(&self) -> String {
577 format!(
578 "Downloading microdescriptors (we are missing {}).",
579 self.missing.len()
580 )
581 }
missing_docs(&self) -> Vec<DocId>582 fn missing_docs(&self) -> Vec<DocId> {
583 self.missing.iter().map(|d| DocId::Microdesc(*d)).collect()
584 }
is_ready(&self, ready: Readiness) -> bool585 fn is_ready(&self, ready: Readiness) -> bool {
586 match ready {
587 Readiness::Complete => self.missing.is_empty(),
588 Readiness::Usable => self.partial.is_none(),
589 }
590 }
can_advance(&self) -> bool591 fn can_advance(&self) -> bool {
592 false
593 }
dl_config(&self) -> Result<DownloadSchedule>594 fn dl_config(&self) -> Result<DownloadSchedule> {
595 if let Some(wd) = Weak::upgrade(&self.writedir) {
596 Ok(*wd.config().schedule().retry_microdescs())
597 } else {
598 Err(Error::ManagerDropped)
599 }
600 }
add_from_cache( &mut self, docs: HashMap<DocId, DocumentText>, storage: Option<&Mutex<SqliteStore>>, ) -> Result<bool>601 fn add_from_cache(
602 &mut self,
603 docs: HashMap<DocId, DocumentText>,
604 storage: Option<&Mutex<SqliteStore>>,
605 ) -> Result<bool> {
606 let mut microdescs = Vec::new();
607 for (id, text) in docs {
608 if let DocId::Microdesc(digest) = id {
609 if !self.missing.remove(&digest) {
610 // XXXX BUG:
611 // we didn't want this.
612 continue;
613 }
614 if let Ok(md) = Microdesc::parse(text.as_str()?) {
615 if md.digest() == &digest {
616 microdescs.push(md);
617 continue;
618 }
619 }
620 warn!("Found a mismatched microdescriptor in cache; ignoring");
621 }
622 }
623
624 let changed = !microdescs.is_empty();
625 if self.register_microdescs(microdescs) {
626 // Just stopped being pending.
627 self.mark_consensus_usable(storage)?;
628 }
629
630 Ok(changed)
631 }
632
add_from_download( &mut self, text: &str, request: &ClientRequest, storage: Option<&Mutex<SqliteStore>>, ) -> Result<bool>633 fn add_from_download(
634 &mut self,
635 text: &str,
636 request: &ClientRequest,
637 storage: Option<&Mutex<SqliteStore>>,
638 ) -> Result<bool> {
639 let requested: HashSet<_> = if let ClientRequest::Microdescs(req) = request {
640 req.digests().collect()
641 } else {
642 return Err(Error::BadArgument("Mismatched request"));
643 };
644 let mut new_mds = Vec::new();
645 for anno in MicrodescReader::new(text, &AllowAnnotations::AnnotationsNotAllowed).flatten() {
646 let txt = anno
647 .within(text)
648 .expect("annotation not from within text as expected");
649 let md = anno.into_microdesc();
650 if !requested.contains(md.digest()) {
651 warn!(
652 "Received microdescriptor we did not ask for: {:?}",
653 md.digest()
654 );
655 continue;
656 }
657 self.missing.remove(md.digest());
658 new_mds.push((txt, md));
659 }
660
661 let mark_listed = self.meta.lifetime().valid_after();
662 if let Some(store) = storage {
663 let mut s = store.lock().expect("Directory storage lock poisoned");
664 if !self.newly_listed.is_empty() {
665 s.update_microdescs_listed(self.newly_listed.iter(), mark_listed)?;
666 self.newly_listed.clear();
667 }
668 if !new_mds.is_empty() {
669 s.store_microdescs(
670 new_mds.iter().map(|(txt, md)| (&txt[..], md.digest())),
671 mark_listed,
672 )?;
673 }
674 }
675 if self.register_microdescs(new_mds.into_iter().map(|(_, md)| md)) {
676 // Just stopped being pending.
677 self.mark_consensus_usable(storage)?;
678 }
679 Ok(true)
680 }
advance(self: Box<Self>) -> Result<Box<dyn DirState>>681 fn advance(self: Box<Self>) -> Result<Box<dyn DirState>> {
682 Ok(self)
683 }
reset_time(&self) -> Option<SystemTime>684 fn reset_time(&self) -> Option<SystemTime> {
685 Some(self.reset_time)
686 }
reset(self: Box<Self>) -> Result<Box<dyn DirState>>687 fn reset(self: Box<Self>) -> Result<Box<dyn DirState>> {
688 Ok(Box::new(GetConsensusState::new(
689 self.writedir,
690 CacheUsage::MustDownload, // XXXX I believe this is wrong?
691 )?))
692 }
693 }
694
695 /// Choose a random download time to replace a consensus whose lifetime
696 /// is `lifetime`.
pick_download_time(lifetime: &Lifetime) -> SystemTime697 fn pick_download_time(lifetime: &Lifetime) -> SystemTime {
698 let (lowbound, uncertainty) = client_download_range(lifetime);
699 let zero = Duration::new(0, 0);
700 let t = lowbound + rand::thread_rng().gen_range(zero..uncertainty);
701 info!("The current consensus is fresh until {}, and valid until {}. I've picked {} as the earliest time to replace it.",
702 OffsetDateTime::from(lifetime.fresh_until()),
703 OffsetDateTime::from(lifetime.valid_until()),
704 OffsetDateTime::from(t));
705 t
706 }
707
708 /// Based on the lifetime for a consensus, return the time range during which
709 /// clients should fetch the next one.
client_download_range(lt: &Lifetime) -> (SystemTime, Duration)710 fn client_download_range(lt: &Lifetime) -> (SystemTime, Duration) {
711 let valid_after = lt.valid_after();
712 let fresh_until = lt.fresh_until();
713 let valid_until = lt.valid_until();
714 let voting_interval = fresh_until
715 .duration_since(valid_after)
716 .expect("valid-after must precede fresh-until");
717 let whole_lifetime = valid_until
718 .duration_since(valid_after)
719 .expect("valid-after must precede valid-until");
720
721 // From dir-spec:
722 // "This time is chosen uniformly at random from the interval
723 // between the time 3/4 into the first interval after the
724 // consensus is no longer fresh, and 7/8 of the time remaining
725 // after that before the consensus is invalid."
726 let lowbound = voting_interval + (voting_interval * 3) / 4;
727 let remainder = whole_lifetime - lowbound;
728 let uncertainty = (remainder * 7) / 8;
729
730 (valid_after + lowbound, uncertainty)
731 }
732
733 /// Helper: call `now` on a Weak<WriteNetDir>.
current_time<DM: WriteNetDir>(writedir: &Weak<DM>) -> Result<SystemTime>734 fn current_time<DM: WriteNetDir>(writedir: &Weak<DM>) -> Result<SystemTime> {
735 if let Some(writedir) = Weak::upgrade(writedir) {
736 Ok(writedir.now())
737 } else {
738 Err(Error::ManagerDropped)
739 }
740 }
741
742 #[cfg(test)]
743 mod test {
744 #![allow(clippy::unwrap_used)]
745 #![allow(clippy::cognitive_complexity)]
746 use super::*;
747 use crate::{Authority, DownloadScheduleConfig};
748 use std::convert::TryInto;
749 use std::sync::{
750 atomic::{self, AtomicBool},
751 Arc,
752 };
753 use tempfile::TempDir;
754 use time::macros::datetime;
755 use tor_netdoc::doc::authcert::AuthCertKeyIds;
756
757 #[test]
download_schedule()758 fn download_schedule() {
759 let va = datetime!(2008-08-02 20:00 UTC).into();
760 let fu = datetime!(2008-08-02 21:00 UTC).into();
761 let vu = datetime!(2008-08-02 23:00 UTC).into();
762 let lifetime = Lifetime::new(va, fu, vu).unwrap();
763
764 let expected_start: SystemTime = datetime!(2008-08-02 21:45 UTC).into();
765 let expected_range = Duration::from_millis((75 * 60 * 1000) * 7 / 8);
766
767 let (start, range) = client_download_range(&lifetime);
768 assert_eq!(start, expected_start);
769 assert_eq!(range, expected_range);
770
771 for _ in 0..100 {
772 let when = pick_download_time(&lifetime);
773 assert!(when > va);
774 assert!(when >= expected_start);
775 assert!(when < vu);
776 assert!(when <= expected_start + range);
777 }
778 }
779
780 /// Makes a memory-backed SqliteStore.
temp_store() -> (TempDir, Mutex<SqliteStore>)781 fn temp_store() -> (TempDir, Mutex<SqliteStore>) {
782 let tempdir = TempDir::new().unwrap();
783 let conn = rusqlite::Connection::open_in_memory().unwrap();
784 let store = SqliteStore::from_conn(conn, tempdir.path()).unwrap();
785 (tempdir, Mutex::new(store))
786 }
787
788 struct DirRcv {
789 cfg: DirMgrConfig,
790 netdir: SharedMutArc<NetDir>,
791 consensus_changed: AtomicBool,
792 descriptors_changed: AtomicBool,
793 now: SystemTime,
794 }
795
796 impl DirRcv {
new(now: SystemTime, authorities: Option<Vec<Authority>>) -> Self797 fn new(now: SystemTime, authorities: Option<Vec<Authority>>) -> Self {
798 let mut netcfg = crate::NetworkConfig::builder();
799 netcfg.fallback_caches(vec![]);
800 if let Some(a) = authorities {
801 netcfg.authorities(a);
802 }
803 let cfg = DirMgrConfig::builder()
804 .cache_path("/we_will_never_use_this/")
805 .network_config(netcfg.build().unwrap())
806 .build()
807 .unwrap();
808 DirRcv {
809 now,
810 cfg,
811 netdir: Default::default(),
812 consensus_changed: false.into(),
813 descriptors_changed: false.into(),
814 }
815 }
816 }
817
818 impl WriteNetDir for DirRcv {
config(&self) -> &DirMgrConfig819 fn config(&self) -> &DirMgrConfig {
820 &self.cfg
821 }
netdir(&self) -> &SharedMutArc<NetDir>822 fn netdir(&self) -> &SharedMutArc<NetDir> {
823 &self.netdir
824 }
netdir_consensus_changed(&self)825 fn netdir_consensus_changed(&self) {
826 self.consensus_changed.store(true, atomic::Ordering::SeqCst);
827 }
netdir_descriptors_changed(&self)828 fn netdir_descriptors_changed(&self) {
829 self.descriptors_changed
830 .store(true, atomic::Ordering::SeqCst);
831 }
now(&self) -> SystemTime832 fn now(&self) -> SystemTime {
833 self.now
834 }
835 }
836
837 // Test data
838 const CONSENSUS: &str = include_str!("../testdata/mdconsensus1.txt");
839 const CONSENSUS2: &str = include_str!("../testdata/mdconsensus2.txt");
840 const AUTHCERT_5696: &str = include_str!("../testdata/cert-5696.txt");
841 const AUTHCERT_5A23: &str = include_str!("../testdata/cert-5A23.txt");
842 #[allow(unused)]
843 const AUTHCERT_7C47: &str = include_str!("../testdata/cert-7C47.txt");
test_time() -> SystemTime844 fn test_time() -> SystemTime {
845 datetime!(2020-08-07 12:42:45 UTC).into()
846 }
rsa(s: &str) -> RsaIdentity847 fn rsa(s: &str) -> RsaIdentity {
848 let k = hex::decode(s).unwrap();
849 RsaIdentity::from_bytes(&k[..]).unwrap()
850 }
test_authorities() -> Vec<Authority>851 fn test_authorities() -> Vec<Authority> {
852 fn a(s: &str) -> Authority {
853 Authority::builder()
854 .name("ignore")
855 .v3ident(rsa(s))
856 .build()
857 .unwrap()
858 }
859 vec![
860 a("5696AB38CB3852AFA476A5C07B2D4788963D5567"),
861 a("5A23BA701776C9C1AB1C06E734E92AB3D5350D64"),
862 // This is an authority according to the consensus, but we'll
863 // pretend we don't recognize it, to make sure that we
864 // don't fetch or accept it.
865 // a("7C47DCB4A90E2C2B7C7AD27BD641D038CF5D7EBE"),
866 ]
867 }
authcert_id_5696() -> AuthCertKeyIds868 fn authcert_id_5696() -> AuthCertKeyIds {
869 AuthCertKeyIds {
870 id_fingerprint: rsa("5696ab38cb3852afa476a5c07b2d4788963d5567"),
871 sk_fingerprint: rsa("f6ed4aa64d83caede34e19693a7fcf331aae8a6a"),
872 }
873 }
authcert_id_5a23() -> AuthCertKeyIds874 fn authcert_id_5a23() -> AuthCertKeyIds {
875 AuthCertKeyIds {
876 id_fingerprint: rsa("5a23ba701776c9c1ab1c06e734e92ab3d5350d64"),
877 sk_fingerprint: rsa("d08e965cc6dcb6cb6ed776db43e616e93af61177"),
878 }
879 }
880 // remember, we're saying that we don't recognize this one as an authority.
authcert_id_7c47() -> AuthCertKeyIds881 fn authcert_id_7c47() -> AuthCertKeyIds {
882 AuthCertKeyIds {
883 id_fingerprint: rsa("7C47DCB4A90E2C2B7C7AD27BD641D038CF5D7EBE"),
884 sk_fingerprint: rsa("D3C013E0E6C82E246090D1C0798B75FCB7ACF120"),
885 }
886 }
microdescs() -> HashMap<MdDigest, String>887 fn microdescs() -> HashMap<MdDigest, String> {
888 const MICRODESCS: &str = include_str!("../testdata/microdescs.txt");
889 let text = MICRODESCS;
890 MicrodescReader::new(text, &AllowAnnotations::AnnotationsNotAllowed)
891 .map(|res| {
892 let anno = res.unwrap();
893 let text = anno.within(text).unwrap();
894 let md = anno.into_microdesc();
895 (*md.digest(), text.to_owned())
896 })
897 .collect()
898 }
899
900 #[test]
get_consensus_state()901 fn get_consensus_state() {
902 let rcv = Arc::new(DirRcv::new(test_time(), None));
903
904 let (_tempdir, store) = temp_store();
905
906 let mut state =
907 GetConsensusState::new(Arc::downgrade(&rcv), CacheUsage::CacheOkay).unwrap();
908
909 // Is description okay?
910 assert_eq!(&state.describe(), "Looking for a consensus.");
911
912 // Basic properties: without a consensus it is not ready to advance.
913 assert!(!state.can_advance());
914 assert!(!state.is_ready(Readiness::Complete));
915 assert!(!state.is_ready(Readiness::Usable));
916
917 // Basic properties: it doesn't want to reset.
918 assert!(state.reset_time().is_none());
919
920 // Download configuration is simple: only 1 request can be done in
921 // parallel. It uses a consensus retry schedule.
922 let retry = state.dl_config().unwrap();
923 assert_eq!(&retry, DownloadScheduleConfig::default().retry_consensus());
924
925 // Do we know what we want?
926 let docs = state.missing_docs();
927 assert_eq!(docs.len(), 1);
928 let docid = docs[0];
929
930 assert!(matches!(
931 docid,
932 DocId::LatestConsensus {
933 flavor: ConsensusFlavor::Microdesc,
934 cache_usage: CacheUsage::CacheOkay,
935 }
936 ));
937
938 // Now suppose that we get some complete junk from a download.
939 let req = tor_dirclient::request::ConsensusRequest::new(ConsensusFlavor::Microdesc);
940 let req = crate::docid::ClientRequest::Consensus(req);
941 let outcome = state.add_from_download("this isn't a consensus", &req, Some(&store));
942 assert!(matches!(outcome, Err(Error::NetDocError(_))));
943 // make sure it wasn't stored...
944 assert!(store
945 .lock()
946 .unwrap()
947 .latest_consensus(ConsensusFlavor::Microdesc, None)
948 .unwrap()
949 .is_none());
950
951 // Now try again, with a real consensus... but the wrong authorities.
952 let outcome = state.add_from_download(CONSENSUS, &req, Some(&store));
953 assert!(matches!(outcome, Err(Error::UnrecognizedAuthorities)));
954 assert!(store
955 .lock()
956 .unwrap()
957 .latest_consensus(ConsensusFlavor::Microdesc, None)
958 .unwrap()
959 .is_none());
960
961 // Great. Change the receiver to use a configuration where these test
962 // authorities are recognized.
963 let rcv = Arc::new(DirRcv::new(test_time(), Some(test_authorities())));
964
965 let mut state =
966 GetConsensusState::new(Arc::downgrade(&rcv), CacheUsage::CacheOkay).unwrap();
967 let outcome = state.add_from_download(CONSENSUS, &req, Some(&store));
968 assert!(outcome.unwrap());
969 assert!(store
970 .lock()
971 .unwrap()
972 .latest_consensus(ConsensusFlavor::Microdesc, None)
973 .unwrap()
974 .is_some());
975
976 // And with that, we should be asking for certificates
977 assert!(state.can_advance());
978 assert_eq!(&state.describe(), "About to fetch certificates.");
979 assert_eq!(state.missing_docs(), Vec::new());
980 let next = Box::new(state).advance().unwrap();
981 assert_eq!(
982 &next.describe(),
983 "Downloading certificates for consensus (we are missing 2/2)."
984 );
985
986 // Try again, but this time get the state from the cache.
987 let rcv = Arc::new(DirRcv::new(test_time(), Some(test_authorities())));
988 let mut state =
989 GetConsensusState::new(Arc::downgrade(&rcv), CacheUsage::CacheOkay).unwrap();
990 let text: crate::storage::InputString = CONSENSUS.to_owned().into();
991 let map = vec![(docid, text.into())].into_iter().collect();
992 let outcome = state.add_from_cache(map, None);
993 assert!(outcome.unwrap());
994 assert!(state.can_advance());
995 }
996
997 #[test]
get_certs_state()998 fn get_certs_state() {
999 /// Construct a GetCertsState with our test data
1000 fn new_getcerts_state() -> (Arc<DirRcv>, Box<dyn DirState>) {
1001 let rcv = Arc::new(DirRcv::new(test_time(), Some(test_authorities())));
1002 let mut state =
1003 GetConsensusState::new(Arc::downgrade(&rcv), CacheUsage::CacheOkay).unwrap();
1004 let req = tor_dirclient::request::ConsensusRequest::new(ConsensusFlavor::Microdesc);
1005 let req = crate::docid::ClientRequest::Consensus(req);
1006 let outcome = state.add_from_download(CONSENSUS, &req, None);
1007 assert!(outcome.unwrap());
1008 (rcv, Box::new(state).advance().unwrap())
1009 }
1010
1011 let (_tempdir, store) = temp_store();
1012 let (_rcv, mut state) = new_getcerts_state();
1013 // Basic properties: description, status, reset time.
1014 assert_eq!(
1015 &state.describe(),
1016 "Downloading certificates for consensus (we are missing 2/2)."
1017 );
1018 assert!(!state.can_advance());
1019 assert!(!state.is_ready(Readiness::Complete));
1020 assert!(!state.is_ready(Readiness::Usable));
1021 let consensus_expires = datetime!(2020-08-07 12:43:20 UTC).into();
1022 assert_eq!(state.reset_time(), Some(consensus_expires));
1023 let retry = state.dl_config().unwrap();
1024 assert_eq!(&retry, DownloadScheduleConfig::default().retry_certs());
1025
1026 // Check that we get the right list of missing docs.
1027 let missing = state.missing_docs();
1028 assert_eq!(missing.len(), 2); // We are missing two certificates.
1029 assert!(missing.contains(&DocId::AuthCert(authcert_id_5696())));
1030 assert!(missing.contains(&DocId::AuthCert(authcert_id_5a23())));
1031 // we don't ask for this one because we don't recognize its authority
1032 assert!(!missing.contains(&DocId::AuthCert(authcert_id_7c47())));
1033
1034 // Add one from the cache; make sure the list is still right
1035 let text1: crate::storage::InputString = AUTHCERT_5696.to_owned().into();
1036 // let text2: crate::storage::InputString = AUTHCERT_5A23.to_owned().into();
1037 let docs = vec![(DocId::AuthCert(authcert_id_5696()), text1.into())]
1038 .into_iter()
1039 .collect();
1040 let outcome = state.add_from_cache(docs, None);
1041 assert!(outcome.unwrap()); // no error, and something changed.
1042 assert!(!state.can_advance()); // But we aren't done yet.
1043 let missing = state.missing_docs();
1044 assert_eq!(missing.len(), 1); // Now we're only missing one!
1045 assert!(missing.contains(&DocId::AuthCert(authcert_id_5a23())));
1046
1047 // Now try to add the other from a download ... but fail
1048 // because we didn't ask for it.
1049 let mut req = tor_dirclient::request::AuthCertRequest::new();
1050 req.push(authcert_id_5696()); // it's the wrong id.
1051 let req = ClientRequest::AuthCert(req);
1052 let outcome = state.add_from_download(AUTHCERT_5A23, &req, Some(&store));
1053 assert!(!outcome.unwrap()); // no error, but nothing changed.
1054 let missing2 = state.missing_docs();
1055 assert_eq!(missing, missing2); // No change.
1056 assert!(store
1057 .lock()
1058 .unwrap()
1059 .authcerts(&[authcert_id_5a23()])
1060 .unwrap()
1061 .is_empty());
1062
1063 // Now try to add the other from a download ... for real!
1064 let mut req = tor_dirclient::request::AuthCertRequest::new();
1065 req.push(authcert_id_5a23()); // Right idea this time!
1066 let req = ClientRequest::AuthCert(req);
1067 let outcome = state.add_from_download(AUTHCERT_5A23, &req, Some(&store));
1068 assert!(outcome.unwrap()); // No error, _and_ something changed!
1069 let missing3 = state.missing_docs();
1070 assert!(missing3.is_empty());
1071 assert!(state.can_advance());
1072 assert!(!store
1073 .lock()
1074 .unwrap()
1075 .authcerts(&[authcert_id_5a23()])
1076 .unwrap()
1077 .is_empty());
1078
1079 let next = state.advance().unwrap();
1080 assert_eq!(
1081 &next.describe(),
1082 "Downloading microdescriptors (we are missing 6)."
1083 );
1084
1085 // If we start from scratch and reset, we're back in GetConsensus.
1086 let (_rcv, state) = new_getcerts_state();
1087 let state = state.reset().unwrap();
1088 assert_eq!(&state.describe(), "Looking for a consensus.");
1089
1090 // TODO: I'd like even more tests to make sure that we never
1091 // accept a certificate for an authority we don't believe in.
1092 }
1093
1094 #[test]
get_microdescs_state()1095 fn get_microdescs_state() {
1096 /// Construct a GetCertsState with our test data
1097 fn new_getmicrodescs_state() -> (Arc<DirRcv>, GetMicrodescsState<DirRcv>) {
1098 let rcv = Arc::new(DirRcv::new(test_time(), Some(test_authorities())));
1099 let (signed, rest, consensus) = MdConsensus::parse(CONSENSUS2).unwrap();
1100 let consensus = consensus
1101 .dangerously_assume_timely()
1102 .dangerously_assume_wellsigned();
1103 let meta = ConsensusMeta::from_consensus(signed, rest, &consensus);
1104 let state = GetMicrodescsState::new(consensus, meta, Arc::downgrade(&rcv)).unwrap();
1105
1106 (rcv, state)
1107 }
1108 fn d64(s: &str) -> MdDigest {
1109 base64::decode(s).unwrap().try_into().unwrap()
1110 }
1111
1112 // If we start from scratch and reset, we're back in GetConsensus.
1113 let (_rcv, state) = new_getmicrodescs_state();
1114 let state = Box::new(state).reset().unwrap();
1115 assert_eq!(&state.describe(), "Downloading a consensus.");
1116
1117 // Check the basics.
1118 let (_rcv, mut state) = new_getmicrodescs_state();
1119 assert_eq!(
1120 &state.describe(),
1121 "Downloading microdescriptors (we are missing 4)."
1122 );
1123 assert!(!state.can_advance());
1124 assert!(!state.is_ready(Readiness::Complete));
1125 assert!(!state.is_ready(Readiness::Usable));
1126 {
1127 let reset_time = state.reset_time().unwrap();
1128 let fresh_until: SystemTime = datetime!(2021-10-27 21:27:00 UTC).into();
1129 let valid_until: SystemTime = datetime!(2021-10-27 21:27:20 UTC).into();
1130 assert!(reset_time >= fresh_until);
1131 assert!(reset_time <= valid_until);
1132 }
1133 let retry = state.dl_config().unwrap();
1134 assert_eq!(&retry, DownloadScheduleConfig::default().retry_microdescs());
1135
1136 // Now check whether we're missing all the right microdescs.
1137 let missing = state.missing_docs();
1138 let md_text = microdescs();
1139 assert_eq!(missing.len(), 4);
1140 assert_eq!(md_text.len(), 4);
1141 let md1 = d64("LOXRj8YZP0kwpEAsYOvBZWZWGoWv5b/Bp2Mz2Us8d8g");
1142 let md2 = d64("iOhVp33NyZxMRDMHsVNq575rkpRViIJ9LN9yn++nPG0");
1143 let md3 = d64("/Cd07b3Bl0K0jX2/1cAvsYXJJMi5d8UBU+oWKaLxoGo");
1144 let md4 = d64("z+oOlR7Ga6cg9OoC/A3D3Ey9Rtc4OldhKlpQblMfQKo");
1145 for md_digest in [md1, md2, md3, md4] {
1146 assert!(missing.contains(&DocId::Microdesc(md_digest)));
1147 assert!(md_text.contains_key(&md_digest));
1148 }
1149
1150 // Try adding a microdesc from the cache.
1151 let (_tempdir, store) = temp_store();
1152 let doc1: crate::storage::InputString = md_text.get(&md1).unwrap().clone().into();
1153 let docs = vec![(DocId::Microdesc(md1), doc1.into())]
1154 .into_iter()
1155 .collect();
1156 let outcome = state.add_from_cache(docs, Some(&store));
1157 assert!(outcome.unwrap()); // successfully loaded one MD.
1158 assert!(!state.can_advance());
1159 assert!(!state.is_ready(Readiness::Complete));
1160 assert!(!state.is_ready(Readiness::Usable));
1161
1162 // Now we should be missing 3.
1163 let missing = state.missing_docs();
1164 assert_eq!(missing.len(), 3);
1165 assert!(!missing.contains(&DocId::Microdesc(md1)));
1166
1167 // Try adding the rest as if from a download.
1168 let mut req = tor_dirclient::request::MicrodescRequest::new();
1169 let mut response = "".to_owned();
1170 for md_digest in [md2, md3, md4] {
1171 response.push_str(md_text.get(&md_digest).unwrap());
1172 req.push(md_digest);
1173 }
1174 let req = ClientRequest::Microdescs(req);
1175 let outcome = state.add_from_download(response.as_str(), &req, Some(&store));
1176 assert!(outcome.unwrap()); // successfully loaded MDs
1177 assert!(state.is_ready(Readiness::Complete));
1178 assert!(state.is_ready(Readiness::Usable));
1179 assert_eq!(
1180 store
1181 .lock()
1182 .unwrap()
1183 .microdescs(&[md2, md3, md4])
1184 .unwrap()
1185 .len(),
1186 3
1187 );
1188
1189 let missing = state.missing_docs();
1190 assert!(missing.is_empty());
1191 }
1192 }
1193