1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 use std::collections::{HashMap, HashSet};
6
7 use crate::bso_record::EncryptedBso;
8 use crate::client::{SetupStorageClient, Sync15ClientResponse};
9 use crate::collection_keys::CollectionKeys;
10 use crate::error::{self, ErrorKind, ErrorResponse};
11 use crate::key_bundle::KeyBundle;
12 use crate::record_types::{MetaGlobalEngine, MetaGlobalRecord};
13 use crate::request::{InfoCollections, InfoConfiguration};
14 use crate::util::ServerTimestamp;
15 use interrupt_support::Interruptee;
16 use serde_derive::*;
17 use sync_guid::Guid;
18
19 use self::SetupState::*;
20
21 const STORAGE_VERSION: usize = 5;
22
23 /// Maps names to storage versions for engines to include in a fresh
24 /// `meta/global` record. We include engines that we don't implement
25 /// because they'll be disabled on other clients if we omit them
26 /// (bug 1479929).
27 const DEFAULT_ENGINES: &[(&str, usize)] = &[
28 ("passwords", 1),
29 ("clients", 1),
30 ("addons", 1),
31 ("addresses", 1),
32 ("bookmarks", 2),
33 ("creditcards", 1),
34 ("forms", 1),
35 ("history", 1),
36 ("prefs", 2),
37 ("tabs", 1),
38 ];
39
40 // Declined engines to include in a fresh `meta/global` record.
41 const DEFAULT_DECLINED: &[&str] = &[];
42
43 /// State that we require the app to persist to storage for us.
44 /// It's a little unfortunate we need this, because it's only tracking
45 /// "declined engines", and even then, only needed in practice when there's
46 /// no meta/global so we need to create one. It's extra unfortunate because we
47 /// want to move away from "globally declined" engines anyway, moving towards
48 /// allowing engines to be enabled or disabled per client rather than globally.
49 ///
50 /// Apps are expected to treat this as opaque, so we support serializing it.
51 /// Note that this structure is *not* used to *change* the declined engines
52 /// list - that will be done in the future, but the API exposed for that
53 /// purpose will also take a mutable PersistedGlobalState.
54 #[derive(Debug, Serialize, Deserialize)]
55 #[serde(tag = "schema_version")]
56 pub enum PersistedGlobalState {
57 /// V1 was when we persisted the entire GlobalState, keys and all!
58
59 /// V2 is just tracking the globally declined list.
60 /// None means "I've no idea" and theoretically should only happen on the
61 /// very first sync for an app.
62 V2 { declined: Option<Vec<String>> },
63 }
64
65 impl Default for PersistedGlobalState {
66 #[inline]
default() -> PersistedGlobalState67 fn default() -> PersistedGlobalState {
68 PersistedGlobalState::V2 { declined: None }
69 }
70 }
71
72 #[derive(Debug, Default, Clone, PartialEq)]
73 pub(crate) struct EngineChangesNeeded {
74 pub local_resets: HashSet<String>,
75 pub remote_wipes: HashSet<String>,
76 }
77
78 #[derive(Debug, Default, Clone, PartialEq)]
79 struct RemoteEngineState {
80 info_collections: HashSet<String>,
81 declined: HashSet<String>,
82 }
83
84 #[derive(Debug, Default, Clone, PartialEq)]
85 struct EngineStateInput {
86 local_declined: HashSet<String>,
87 remote: Option<RemoteEngineState>,
88 user_changes: HashMap<String, bool>,
89 }
90
91 #[derive(Debug, Default, Clone, PartialEq)]
92 struct EngineStateOutput {
93 // The new declined.
94 declined: HashSet<String>,
95 // Which engines need resets or wipes.
96 changes_needed: EngineChangesNeeded,
97 }
98
compute_engine_states(input: EngineStateInput) -> EngineStateOutput99 fn compute_engine_states(input: EngineStateInput) -> EngineStateOutput {
100 use crate::util::*;
101 log::debug!("compute_engine_states: input {:?}", input);
102 let (must_enable, must_disable) = partition_by_value(&input.user_changes);
103 let have_remote = input.remote.is_some();
104 let RemoteEngineState {
105 info_collections,
106 declined: remote_declined,
107 } = input.remote.clone().unwrap_or_default();
108
109 let both_declined_and_remote = set_intersection(&info_collections, &remote_declined);
110 if !both_declined_and_remote.is_empty() {
111 // Should we wipe these too?
112 log::warn!(
113 "Remote state contains engines which are in both info/collections and meta/global's declined: {:?}",
114 both_declined_and_remote,
115 );
116 }
117
118 let most_recent_declined_list = if have_remote {
119 &remote_declined
120 } else {
121 &input.local_declined
122 };
123
124 let result_declined = set_difference(
125 &set_union(most_recent_declined_list, &must_disable),
126 &must_enable,
127 );
128
129 let output = EngineStateOutput {
130 changes_needed: EngineChangesNeeded {
131 // Anything now declined which wasn't in our declined list before gets a reset.
132 local_resets: set_difference(&result_declined, &input.local_declined),
133 // Anything remote that we just declined gets a wipe. In the future
134 // we might want to consider wiping things in both remote declined
135 // and info/collections, but we'll let other clients pick up their
136 // own mess for now.
137 remote_wipes: set_intersection(&info_collections, &must_disable),
138 },
139 declined: result_declined,
140 };
141 // No PII here and this helps debug problems.
142 log::debug!("compute_engine_states: output {:?}", output);
143 output
144 }
145
146 impl PersistedGlobalState {
set_declined(&mut self, new_declined: Vec<String>)147 fn set_declined(&mut self, new_declined: Vec<String>) {
148 match self {
149 Self::V2 { ref mut declined } => *declined = Some(new_declined),
150 }
151 }
get_declined(&self) -> &[String]152 pub(crate) fn get_declined(&self) -> &[String] {
153 match self {
154 Self::V2 { declined: Some(d) } => &d,
155 Self::V2 { declined: None } => &[],
156 }
157 }
158 }
159
160 /// Holds global Sync state, including server upload limits, the
161 /// last-fetched collection modified times, `meta/global` record, and
162 /// encrypted copies of the crypto/keys resourse (which we hold as encrypted
163 /// both to avoid keeping them in memory longer than necessary, and guard against
164 /// the wrong (ie, a different user's) root key being passed in.
165 #[derive(Debug, Clone)]
166 pub struct GlobalState {
167 pub config: InfoConfiguration,
168 pub collections: InfoCollections,
169 pub global: MetaGlobalRecord,
170 pub global_timestamp: ServerTimestamp,
171 pub keys: EncryptedBso,
172 }
173
174 /// Creates a fresh `meta/global` record, using the default engine selections,
175 /// and declined engines from our PersistedGlobalState.
new_global(pgs: &PersistedGlobalState) -> error::Result<MetaGlobalRecord>176 fn new_global(pgs: &PersistedGlobalState) -> error::Result<MetaGlobalRecord> {
177 let sync_id = Guid::random();
178 let mut engines: HashMap<String, _> = HashMap::new();
179 for (name, version) in DEFAULT_ENGINES.iter() {
180 let sync_id = Guid::random();
181 engines.insert(
182 (*name).to_string(),
183 MetaGlobalEngine {
184 version: *version,
185 sync_id,
186 },
187 );
188 }
189 // We only need our PersistedGlobalState to fill out a new meta/global - if
190 // we previously saw a meta/global then we would have updated it with what
191 // it was at the time.
192 let declined = match pgs {
193 PersistedGlobalState::V2 { declined: Some(d) } => d.clone(),
194 _ => DEFAULT_DECLINED.iter().map(ToString::to_string).collect(),
195 };
196
197 Ok(MetaGlobalRecord {
198 sync_id,
199 storage_version: STORAGE_VERSION,
200 engines,
201 declined,
202 })
203 }
204
fixup_meta_global(global: &mut MetaGlobalRecord) -> bool205 fn fixup_meta_global(global: &mut MetaGlobalRecord) -> bool {
206 let mut changed_any = false;
207 for &(name, version) in DEFAULT_ENGINES.iter() {
208 let had_engine = global.engines.contains_key(name);
209 let should_have_engine = !global.declined.iter().any(|c| c == name);
210 if had_engine != should_have_engine {
211 if should_have_engine {
212 log::debug!("SyncID for engine {:?} was missing", name);
213 global.engines.insert(
214 name.to_string(),
215 MetaGlobalEngine {
216 version,
217 sync_id: Guid::random(),
218 },
219 );
220 } else {
221 log::debug!("SyncID for engine {:?} was present, but shouldn't be", name);
222 global.engines.remove(name);
223 }
224 changed_any = true;
225 }
226 }
227 changed_any
228 }
229
230 pub struct SetupStateMachine<'a> {
231 client: &'a dyn SetupStorageClient,
232 root_key: &'a KeyBundle,
233 pgs: &'a mut PersistedGlobalState,
234 // `allowed_states` is designed so that we can arrange for the concept of
235 // a "fast" sync - so we decline to advance if we need to setup from scratch.
236 // The idea is that if we need to sync before going to sleep we should do
237 // it as fast as possible. However, in practice this isn't going to do
238 // what we expect - a "fast sync" that finds lots to do is almost certainly
239 // going to take longer than a "full sync" that finds nothing to do.
240 // We should almost certainly remove this and instead allow for a "time
241 // budget", after which we get interrupted. Later...
242 allowed_states: Vec<&'static str>,
243 sequence: Vec<&'static str>,
244 engine_updates: Option<&'a HashMap<String, bool>>,
245 interruptee: &'a dyn Interruptee,
246 pub(crate) changes_needed: Option<EngineChangesNeeded>,
247 }
248
249 impl<'a> SetupStateMachine<'a> {
250 /// Creates a state machine for a "classic" Sync 1.5 client that supports
251 /// all states, including uploading a fresh `meta/global` and `crypto/keys`
252 /// after a node reassignment.
for_full_sync( client: &'a dyn SetupStorageClient, root_key: &'a KeyBundle, pgs: &'a mut PersistedGlobalState, engine_updates: Option<&'a HashMap<String, bool>>, interruptee: &'a dyn Interruptee, ) -> SetupStateMachine<'a>253 pub fn for_full_sync(
254 client: &'a dyn SetupStorageClient,
255 root_key: &'a KeyBundle,
256 pgs: &'a mut PersistedGlobalState,
257 engine_updates: Option<&'a HashMap<String, bool>>,
258 interruptee: &'a dyn Interruptee,
259 ) -> SetupStateMachine<'a> {
260 SetupStateMachine::with_allowed_states(
261 client,
262 root_key,
263 pgs,
264 interruptee,
265 engine_updates,
266 vec![
267 "Initial",
268 "InitialWithConfig",
269 "InitialWithInfo",
270 "InitialWithMetaGlobal",
271 "Ready",
272 "FreshStartRequired",
273 "WithPreviousState",
274 ],
275 )
276 }
277
278 /// Creates a state machine for a fast sync, which only uses locally
279 /// cached global state, and bails if `meta/global` or `crypto/keys`
280 /// are missing or out-of-date. This is useful in cases where it's
281 /// important to get to ready as quickly as possible, like syncing before
282 /// sleep, or when conserving time or battery life.
for_fast_sync( client: &'a dyn SetupStorageClient, root_key: &'a KeyBundle, pgs: &'a mut PersistedGlobalState, engine_updates: Option<&'a HashMap<String, bool>>, interruptee: &'a dyn Interruptee, ) -> SetupStateMachine<'a>283 pub fn for_fast_sync(
284 client: &'a dyn SetupStorageClient,
285 root_key: &'a KeyBundle,
286 pgs: &'a mut PersistedGlobalState,
287 engine_updates: Option<&'a HashMap<String, bool>>,
288 interruptee: &'a dyn Interruptee,
289 ) -> SetupStateMachine<'a> {
290 SetupStateMachine::with_allowed_states(
291 client,
292 root_key,
293 pgs,
294 interruptee,
295 engine_updates,
296 vec!["Ready", "WithPreviousState"],
297 )
298 }
299
300 /// Creates a state machine for a read-only sync, where the client can't
301 /// upload `meta/global` or `crypto/keys`. Useful for clients that only
302 /// sync specific collections, like Lockbox.
for_readonly_sync( client: &'a dyn SetupStorageClient, root_key: &'a KeyBundle, pgs: &'a mut PersistedGlobalState, interruptee: &'a dyn Interruptee, ) -> SetupStateMachine<'a>303 pub fn for_readonly_sync(
304 client: &'a dyn SetupStorageClient,
305 root_key: &'a KeyBundle,
306 pgs: &'a mut PersistedGlobalState,
307 interruptee: &'a dyn Interruptee,
308 ) -> SetupStateMachine<'a> {
309 SetupStateMachine::with_allowed_states(
310 client,
311 root_key,
312 pgs,
313 interruptee,
314 // No engine updates for a readonly sync
315 None,
316 // We don't allow a FreshStart in a read-only sync.
317 vec![
318 "Initial",
319 "InitialWithConfig",
320 "InitialWithInfo",
321 "InitialWithMetaGlobal",
322 "Ready",
323 "WithPreviousState",
324 ],
325 )
326 }
327
with_allowed_states( client: &'a dyn SetupStorageClient, root_key: &'a KeyBundle, pgs: &'a mut PersistedGlobalState, interruptee: &'a dyn Interruptee, engine_updates: Option<&'a HashMap<String, bool>>, allowed_states: Vec<&'static str>, ) -> SetupStateMachine<'a>328 fn with_allowed_states(
329 client: &'a dyn SetupStorageClient,
330 root_key: &'a KeyBundle,
331 pgs: &'a mut PersistedGlobalState,
332 interruptee: &'a dyn Interruptee,
333 engine_updates: Option<&'a HashMap<String, bool>>,
334 allowed_states: Vec<&'static str>,
335 ) -> SetupStateMachine<'a> {
336 SetupStateMachine {
337 client,
338 root_key,
339 pgs,
340 sequence: Vec::new(),
341 allowed_states,
342 engine_updates,
343 interruptee,
344 changes_needed: None,
345 }
346 }
347
advance(&mut self, from: SetupState) -> error::Result<SetupState>348 fn advance(&mut self, from: SetupState) -> error::Result<SetupState> {
349 match from {
350 // Fetch `info/configuration` with current server limits, and
351 // `info/collections` with collection last modified times.
352 Initial => {
353 let config = match self.client.fetch_info_configuration()? {
354 Sync15ClientResponse::Success { record, .. } => record,
355 Sync15ClientResponse::Error(ErrorResponse::NotFound { .. }) => {
356 InfoConfiguration::default()
357 }
358 other => return Err(other.create_storage_error().into()),
359 };
360 Ok(InitialWithConfig { config })
361 }
362
363 // XXX - we could consider combining these Initial* states, because we don't
364 // attempt to support filling in "missing" global state - *any* 404 in them
365 // means `FreshStart`.
366 // IOW, in all cases, they either `Err()`, move to `FreshStartRequired`, or
367 // advance to a specific next state.
368 InitialWithConfig { config } => {
369 match self.client.fetch_info_collections()? {
370 Sync15ClientResponse::Success {
371 record: collections,
372 ..
373 } => Ok(InitialWithInfo {
374 config,
375 collections,
376 }),
377 // If the server doesn't have a `crypto/keys`, start over
378 // and reupload our `meta/global` and `crypto/keys`.
379 Sync15ClientResponse::Error(ErrorResponse::NotFound { .. }) => {
380 Ok(FreshStartRequired { config })
381 }
382 other => Err(other.create_storage_error().into()),
383 }
384 }
385
386 InitialWithInfo {
387 config,
388 collections,
389 } => {
390 match self.client.fetch_meta_global()? {
391 Sync15ClientResponse::Success {
392 record: mut global,
393 last_modified: mut global_timestamp,
394 ..
395 } => {
396 // If the server has a newer storage version, we can't
397 // sync until our client is updated.
398 if global.storage_version > STORAGE_VERSION {
399 return Err(ErrorKind::ClientUpgradeRequired.into());
400 }
401
402 // If the server has an older storage version, wipe and
403 // reupload.
404 if global.storage_version < STORAGE_VERSION {
405 Ok(FreshStartRequired { config })
406 } else {
407 log::info!("Have info/collections and meta/global. Computing new engine states");
408 let initial_global_declined: HashSet<String> =
409 global.declined.iter().cloned().collect();
410 let result = compute_engine_states(EngineStateInput {
411 local_declined: self.pgs.get_declined().iter().cloned().collect(),
412 user_changes: self.engine_updates.cloned().unwrap_or_default(),
413 remote: Some(RemoteEngineState {
414 declined: initial_global_declined.clone(),
415 info_collections: collections.keys().cloned().collect(),
416 }),
417 });
418 // Persist the new declined.
419 self.pgs
420 .set_declined(result.declined.iter().cloned().collect());
421 // If the declined engines differ from remote, fix that.
422 let fixed_declined = if result.declined != initial_global_declined {
423 global.declined = result.declined.iter().cloned().collect();
424 log::info!(
425 "Uploading new declined {:?} to meta/global with timestamp {:?}",
426 global.declined,
427 global_timestamp,
428 );
429 true
430 } else {
431 false
432 };
433 // If there are missing syncIds, we need to fix those as well
434 let fixed_ids = if fixup_meta_global(&mut global) {
435 log::info!(
436 "Uploading corrected meta/global with timestamp {:?}",
437 global_timestamp,
438 );
439 true
440 } else {
441 false
442 };
443
444 if fixed_declined || fixed_ids {
445 global_timestamp =
446 self.client.put_meta_global(global_timestamp, &global)?;
447 log::debug!("new global_timestamp: {:?}", global_timestamp);
448 }
449 // Update the set of changes needed.
450 if self.changes_needed.is_some() {
451 // Should never happen (we prevent state machine
452 // loops elsewhere) but if it did, the info is stale
453 // anyway.
454 log::warn!("Already have a set of changes needed, Overwriting...");
455 }
456 self.changes_needed = Some(result.changes_needed);
457 Ok(InitialWithMetaGlobal {
458 config,
459 collections,
460 global,
461 global_timestamp,
462 })
463 }
464 }
465 Sync15ClientResponse::Error(ErrorResponse::NotFound { .. }) => {
466 Ok(FreshStartRequired { config })
467 }
468 other => Err(other.create_storage_error().into()),
469 }
470 }
471
472 InitialWithMetaGlobal {
473 config,
474 collections,
475 global,
476 global_timestamp,
477 } => {
478 // Now try and get keys etc - if we fresh-start we'll re-use declined.
479 match self.client.fetch_crypto_keys()? {
480 Sync15ClientResponse::Success {
481 record,
482 last_modified,
483 ..
484 } => {
485 // Note that collection/keys is itself a bso, so the
486 // json body also carries the timestamp. If they aren't
487 // identical something has screwed up and we should die.
488 assert_eq!(last_modified, record.modified);
489 let state = GlobalState {
490 config,
491 collections,
492 global,
493 global_timestamp,
494 keys: record,
495 };
496 Ok(Ready { state })
497 }
498 // If the server doesn't have a `crypto/keys`, start over
499 // and reupload our `meta/global` and `crypto/keys`.
500 Sync15ClientResponse::Error(ErrorResponse::NotFound { .. }) => {
501 Ok(FreshStartRequired { config })
502 }
503 other => Err(other.create_storage_error().into()),
504 }
505 }
506
507 // We've got old state that's likely to be OK.
508 // We keep things simple here - if there's evidence of a new/missing
509 // meta/global or new/missing keys we just restart from scratch.
510 WithPreviousState { old_state } => match self.client.fetch_info_collections()? {
511 Sync15ClientResponse::Success {
512 record: collections,
513 ..
514 } => Ok(
515 if self.engine_updates.is_none()
516 && is_same_timestamp(old_state.global_timestamp, &collections, "meta")
517 && is_same_timestamp(old_state.keys.modified, &collections, "crypto")
518 {
519 Ready {
520 state: GlobalState {
521 collections,
522 ..old_state
523 },
524 }
525 } else {
526 InitialWithConfig {
527 config: old_state.config,
528 }
529 },
530 ),
531 _ => Ok(InitialWithConfig {
532 config: old_state.config,
533 }),
534 },
535
536 Ready { state } => Ok(Ready { state }),
537
538 FreshStartRequired { config } => {
539 // Wipe the server.
540 log::info!("Fresh start: wiping remote");
541 self.client.wipe_all_remote()?;
542
543 // Upload a fresh `meta/global`...
544 log::info!("Uploading meta/global");
545 let computed = compute_engine_states(EngineStateInput {
546 local_declined: self.pgs.get_declined().iter().cloned().collect(),
547 user_changes: self.engine_updates.cloned().unwrap_or_default(),
548 remote: None,
549 });
550 self.pgs
551 .set_declined(computed.declined.iter().cloned().collect());
552
553 self.changes_needed = Some(computed.changes_needed);
554
555 let new_global = new_global(self.pgs)?;
556
557 self.client
558 .put_meta_global(ServerTimestamp::default(), &new_global)?;
559
560 // ...And a fresh `crypto/keys`.
561 let new_keys = CollectionKeys::new_random()?.to_encrypted_bso(&self.root_key)?;
562 self.client
563 .put_crypto_keys(ServerTimestamp::default(), &new_keys)?;
564
565 // TODO(lina): Can we pass along server timestamps from the PUTs
566 // above, and avoid re-fetching the `m/g` and `c/k` we just
567 // uploaded?
568 // OTOH(mark): restarting the state machine keeps life simple and rare.
569 Ok(InitialWithConfig { config })
570 }
571 }
572 }
573
574 /// Runs through the state machine to the ready state.
run_to_ready(&mut self, state: Option<GlobalState>) -> error::Result<GlobalState>575 pub fn run_to_ready(&mut self, state: Option<GlobalState>) -> error::Result<GlobalState> {
576 let mut s = match state {
577 Some(old_state) => WithPreviousState { old_state },
578 None => Initial,
579 };
580 loop {
581 self.interruptee.err_if_interrupted()?;
582 let label = &s.label();
583 log::trace!("global state: {:?}", label);
584 match s {
585 Ready { state } => {
586 self.sequence.push(label);
587 return Ok(state);
588 }
589 // If we already started over once before, we're likely in a
590 // cycle, and should try again later. Intermediate states
591 // aren't a problem, just the initial ones.
592 FreshStartRequired { .. } | WithPreviousState { .. } | Initial => {
593 if self.sequence.contains(&label) {
594 // Is this really the correct error?
595 return Err(ErrorKind::SetupRace.into());
596 }
597 }
598 _ => {
599 if !self.allowed_states.contains(&label) {
600 return Err(ErrorKind::SetupRequired.into());
601 }
602 }
603 };
604 self.sequence.push(label);
605 s = self.advance(s)?;
606 }
607 }
608 }
609
610 /// States in the remote setup process.
611 /// TODO(lina): Add link once #56 is merged.
612 #[derive(Debug)]
613 #[allow(clippy::large_enum_variant)]
614 enum SetupState {
615 // These "Initial" states are only ever used when starting from scratch.
616 Initial,
617 InitialWithConfig {
618 config: InfoConfiguration,
619 },
620 InitialWithInfo {
621 config: InfoConfiguration,
622 collections: InfoCollections,
623 },
624 InitialWithMetaGlobal {
625 config: InfoConfiguration,
626 collections: InfoCollections,
627 global: MetaGlobalRecord,
628 global_timestamp: ServerTimestamp,
629 },
630 WithPreviousState {
631 old_state: GlobalState,
632 },
633 Ready {
634 state: GlobalState,
635 },
636 FreshStartRequired {
637 config: InfoConfiguration,
638 },
639 }
640
641 impl SetupState {
label(&self) -> &'static str642 fn label(&self) -> &'static str {
643 match self {
644 Initial { .. } => "Initial",
645 InitialWithConfig { .. } => "InitialWithConfig",
646 InitialWithInfo { .. } => "InitialWithInfo",
647 InitialWithMetaGlobal { .. } => "InitialWithMetaGlobal",
648 Ready { .. } => "Ready",
649 WithPreviousState { .. } => "WithPreviousState",
650 FreshStartRequired { .. } => "FreshStartRequired",
651 }
652 }
653 }
654
655 /// Whether we should skip fetching an item. Used when we already have timestamps
656 /// and want to check if we should reuse our existing state. The state's fairly
657 /// cheap to recreate and very bad to use if it is wrong, so we insist on the
658 /// *exact* timestamp matching and not a simple "later than" check.
is_same_timestamp(local: ServerTimestamp, collections: &InfoCollections, key: &str) -> bool659 fn is_same_timestamp(local: ServerTimestamp, collections: &InfoCollections, key: &str) -> bool {
660 collections.get(key).map_or(false, |ts| local == *ts)
661 }
662
663 #[cfg(test)]
664 mod tests {
665 use super::*;
666
667 use crate::bso_record::{BsoRecord, EncryptedBso, EncryptedPayload, Payload};
668 use crate::record_types::CryptoKeysRecord;
669 use interrupt_support::NeverInterrupts;
670
671 struct InMemoryClient {
672 info_configuration: error::Result<Sync15ClientResponse<InfoConfiguration>>,
673 info_collections: error::Result<Sync15ClientResponse<InfoCollections>>,
674 meta_global: error::Result<Sync15ClientResponse<MetaGlobalRecord>>,
675 crypto_keys: error::Result<Sync15ClientResponse<BsoRecord<EncryptedPayload>>>,
676 }
677
678 impl SetupStorageClient for InMemoryClient {
fetch_info_configuration( &self, ) -> error::Result<Sync15ClientResponse<InfoConfiguration>>679 fn fetch_info_configuration(
680 &self,
681 ) -> error::Result<Sync15ClientResponse<InfoConfiguration>> {
682 match &self.info_configuration {
683 Ok(client_response) => Ok(client_response.clone()),
684 Err(_) => Ok(Sync15ClientResponse::Error(ErrorResponse::ServerError {
685 status: 500,
686 route: "test/path".into(),
687 })),
688 }
689 }
690
fetch_info_collections(&self) -> error::Result<Sync15ClientResponse<InfoCollections>>691 fn fetch_info_collections(&self) -> error::Result<Sync15ClientResponse<InfoCollections>> {
692 match &self.info_collections {
693 Ok(collections) => Ok(collections.clone()),
694 Err(_) => Ok(Sync15ClientResponse::Error(ErrorResponse::ServerError {
695 status: 500,
696 route: "test/path".into(),
697 })),
698 }
699 }
700
fetch_meta_global(&self) -> error::Result<Sync15ClientResponse<MetaGlobalRecord>>701 fn fetch_meta_global(&self) -> error::Result<Sync15ClientResponse<MetaGlobalRecord>> {
702 match &self.meta_global {
703 Ok(global) => Ok(global.clone()),
704 // TODO(lina): Special handling for 404s, we want to ensure we
705 // handle missing keys and other server errors correctly.
706 Err(_) => Ok(Sync15ClientResponse::Error(ErrorResponse::ServerError {
707 status: 500,
708 route: "test/path".into(),
709 })),
710 }
711 }
712
put_meta_global( &self, xius: ServerTimestamp, global: &MetaGlobalRecord, ) -> error::Result<ServerTimestamp>713 fn put_meta_global(
714 &self,
715 xius: ServerTimestamp,
716 global: &MetaGlobalRecord,
717 ) -> error::Result<ServerTimestamp> {
718 // Ensure that the meta/global record we uploaded is "fixed up"
719 assert!(DEFAULT_ENGINES
720 .iter()
721 .filter(|e| e.0 != "logins")
722 .all(|&(k, _v)| global.engines.contains_key(k)));
723 assert!(!global.engines.contains_key("logins"));
724 assert_eq!(global.declined, vec!["logins".to_string()]);
725 // return a different timestamp.
726 Ok(ServerTimestamp(xius.0 + 1))
727 }
728
fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<EncryptedBso>>729 fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<EncryptedBso>> {
730 match &self.crypto_keys {
731 Ok(keys) => Ok(keys.clone()),
732 // TODO(lina): Same as above, for 404s.
733 Err(_) => Ok(Sync15ClientResponse::Error(ErrorResponse::ServerError {
734 status: 500,
735 route: "test/path".into(),
736 })),
737 }
738 }
739
put_crypto_keys( &self, xius: ServerTimestamp, _keys: &EncryptedBso, ) -> error::Result<()>740 fn put_crypto_keys(
741 &self,
742 xius: ServerTimestamp,
743 _keys: &EncryptedBso,
744 ) -> error::Result<()> {
745 assert_eq!(xius, ServerTimestamp(888_800));
746 Err(ErrorKind::StorageHttpError(ErrorResponse::ServerError {
747 status: 500,
748 route: "crypto/keys".to_string(),
749 })
750 .into())
751 }
752
wipe_all_remote(&self) -> error::Result<()>753 fn wipe_all_remote(&self) -> error::Result<()> {
754 Ok(())
755 }
756 }
757
mocked_success_ts<T>(t: T, ts: i64) -> error::Result<Sync15ClientResponse<T>>758 fn mocked_success_ts<T>(t: T, ts: i64) -> error::Result<Sync15ClientResponse<T>> {
759 Ok(Sync15ClientResponse::Success {
760 status: 200,
761 record: t,
762 last_modified: ServerTimestamp(ts),
763 route: "test/path".into(),
764 })
765 }
766
mocked_success<T>(t: T) -> error::Result<Sync15ClientResponse<T>>767 fn mocked_success<T>(t: T) -> error::Result<Sync15ClientResponse<T>> {
768 mocked_success_ts(t, 0)
769 }
770
771 // for tests, we want a BSO with a specific timestamp, which we never
772 // need in non-test-code as the timestamp comes from the server.
773 impl CollectionKeys {
to_encrypted_bso_with_timestamp( &self, root_key: &KeyBundle, modified: ServerTimestamp, ) -> error::Result<EncryptedBso>774 pub fn to_encrypted_bso_with_timestamp(
775 &self,
776 root_key: &KeyBundle,
777 modified: ServerTimestamp,
778 ) -> error::Result<EncryptedBso> {
779 let record = CryptoKeysRecord {
780 id: "keys".into(),
781 collection: "crypto".into(),
782 default: self.default.to_b64_array(),
783 collections: self
784 .collections
785 .iter()
786 .map(|kv| (kv.0.clone(), kv.1.to_b64_array()))
787 .collect(),
788 };
789 let mut bso =
790 crate::CleartextBso::from_payload(Payload::from_record(record)?, "crypto");
791 bso.modified = modified;
792 Ok(bso.encrypt(root_key)?)
793 }
794 }
795
796 #[test]
test_state_machine_ready_from_empty()797 fn test_state_machine_ready_from_empty() {
798 let root_key = KeyBundle::new_random().unwrap();
799 let keys = CollectionKeys {
800 timestamp: ServerTimestamp(123_400),
801 default: KeyBundle::new_random().unwrap(),
802 collections: HashMap::new(),
803 };
804 let mg = MetaGlobalRecord {
805 sync_id: "syncIDAAAAAA".into(),
806 storage_version: 5usize,
807 engines: vec![(
808 "bookmarks",
809 MetaGlobalEngine {
810 version: 1usize,
811 sync_id: "syncIDBBBBBB".into(),
812 },
813 )]
814 .into_iter()
815 .map(|(key, value)| (key.to_owned(), value))
816 .collect(),
817 // We ensure that the record we upload doesn't have a logins record.
818 declined: vec!["logins".to_string()],
819 };
820 let client = InMemoryClient {
821 info_configuration: mocked_success(InfoConfiguration::default()),
822 info_collections: mocked_success(InfoCollections::new(
823 vec![("meta", 123_456), ("crypto", 145_000)]
824 .into_iter()
825 .map(|(key, value)| (key.to_owned(), ServerTimestamp(value)))
826 .collect(),
827 )),
828 meta_global: mocked_success_ts(mg, 999_000),
829 crypto_keys: mocked_success_ts(
830 keys.to_encrypted_bso_with_timestamp(&root_key, ServerTimestamp(888_000))
831 .expect("should always work in this test"),
832 888_000,
833 ),
834 };
835 let mut pgs = PersistedGlobalState::V2 { declined: None };
836
837 let mut state_machine =
838 SetupStateMachine::for_full_sync(&client, &root_key, &mut pgs, None, &NeverInterrupts);
839 assert!(
840 state_machine.run_to_ready(None).is_ok(),
841 "Should drive state machine to ready"
842 );
843 assert_eq!(
844 state_machine.sequence,
845 vec![
846 "Initial",
847 "InitialWithConfig",
848 "InitialWithInfo",
849 "InitialWithMetaGlobal",
850 "Ready",
851 ],
852 "Should cycle through all states"
853 );
854 }
855
856 #[test]
test_from_previous_state_declined()857 fn test_from_previous_state_declined() {
858 let _ = env_logger::try_init();
859 // The state-machine sequence where we didn't use the previous state
860 // (ie, where the state machine restarted)
861 let sm_seq_restarted = vec![
862 "WithPreviousState",
863 "InitialWithConfig",
864 "InitialWithInfo",
865 "InitialWithMetaGlobal",
866 "Ready",
867 ];
868 // The state-machine sequence where we used the previous state.
869 let sm_seq_used_previous = vec!["WithPreviousState", "Ready"];
870
871 // do the actual test.
872 fn do_test(
873 client: &dyn SetupStorageClient,
874 root_key: &KeyBundle,
875 mut pgs: &mut PersistedGlobalState,
876 engine_updates: Option<&HashMap<String, bool>>,
877 old_state: GlobalState,
878 expected_states: &[&str],
879 ) {
880 let mut state_machine = SetupStateMachine::for_full_sync(
881 client,
882 root_key,
883 &mut pgs,
884 engine_updates,
885 &NeverInterrupts,
886 );
887 assert!(
888 state_machine.run_to_ready(Some(old_state)).is_ok(),
889 "Should drive state machine to ready"
890 );
891 assert_eq!(state_machine.sequence, expected_states);
892 }
893
894 // and all the complicated setup...
895 let ts_metaglobal = 123_456;
896 let ts_keys = 145_000;
897 let root_key = KeyBundle::new_random().unwrap();
898 let keys = CollectionKeys {
899 timestamp: ServerTimestamp(ts_keys + 1),
900 default: KeyBundle::new_random().unwrap(),
901 collections: HashMap::new(),
902 };
903 let mg = MetaGlobalRecord {
904 sync_id: "syncIDAAAAAA".into(),
905 storage_version: 5usize,
906 engines: vec![(
907 "bookmarks",
908 MetaGlobalEngine {
909 version: 1usize,
910 sync_id: "syncIDBBBBBB".into(),
911 },
912 )]
913 .into_iter()
914 .map(|(key, value)| (key.to_owned(), value))
915 .collect(),
916 // We ensure that the record we upload doesn't have a logins record.
917 declined: vec!["logins".to_string()],
918 };
919 let collections = InfoCollections::new(
920 vec![("meta", ts_metaglobal), ("crypto", ts_keys)]
921 .into_iter()
922 .map(|(key, value)| (key.to_owned(), ServerTimestamp(value)))
923 .collect(),
924 );
925 let client = InMemoryClient {
926 info_configuration: mocked_success(InfoConfiguration::default()),
927 info_collections: mocked_success(collections.clone()),
928 meta_global: mocked_success_ts(mg.clone(), ts_metaglobal),
929 crypto_keys: mocked_success_ts(
930 keys.to_encrypted_bso_with_timestamp(&root_key, ServerTimestamp(ts_keys))
931 .expect("should always work in this test"),
932 ts_keys,
933 ),
934 };
935
936 // First a test where the "previous" global state is OK to reuse.
937 {
938 let mut pgs = PersistedGlobalState::V2 { declined: None };
939 // A "previous" global state.
940 let old_state = GlobalState {
941 config: InfoConfiguration::default(),
942 collections: collections.clone(),
943 global: mg.clone(),
944 global_timestamp: ServerTimestamp(ts_metaglobal),
945 keys: keys
946 .to_encrypted_bso_with_timestamp(&root_key, ServerTimestamp(ts_keys))
947 .expect("should always work in this test"),
948 };
949 do_test(
950 &client,
951 &root_key,
952 &mut pgs,
953 None,
954 old_state,
955 &sm_seq_used_previous,
956 );
957 }
958
959 // Now where the meta/global record on the server is later.
960 {
961 let mut pgs = PersistedGlobalState::V2 { declined: None };
962 // A "previous" global state.
963 let old_state = GlobalState {
964 config: InfoConfiguration::default(),
965 collections: collections.clone(),
966 global: mg.clone(),
967 global_timestamp: ServerTimestamp(999_999),
968 keys: keys
969 .to_encrypted_bso_with_timestamp(&root_key, ServerTimestamp(ts_keys))
970 .expect("should always work in this test"),
971 };
972 do_test(
973 &client,
974 &root_key,
975 &mut pgs,
976 None,
977 old_state,
978 &sm_seq_restarted,
979 );
980 }
981
982 // Where keys on the server is later.
983 {
984 let mut pgs = PersistedGlobalState::V2 { declined: None };
985 // A "previous" global state.
986 let old_state = GlobalState {
987 config: InfoConfiguration::default(),
988 collections: collections.clone(),
989 global: mg.clone(),
990 global_timestamp: ServerTimestamp(ts_metaglobal),
991 keys: keys
992 .to_encrypted_bso_with_timestamp(&root_key, ServerTimestamp(999_999))
993 .expect("should always work in this test"),
994 };
995 do_test(
996 &client,
997 &root_key,
998 &mut pgs,
999 None,
1000 old_state,
1001 &sm_seq_restarted,
1002 );
1003 }
1004
1005 // Where there are engine-state changes.
1006 {
1007 let mut pgs = PersistedGlobalState::V2 { declined: None };
1008 // A "previous" global state.
1009 let old_state = GlobalState {
1010 config: InfoConfiguration::default(),
1011 collections,
1012 global: mg,
1013 global_timestamp: ServerTimestamp(ts_metaglobal),
1014 keys: keys
1015 .to_encrypted_bso_with_timestamp(&root_key, ServerTimestamp(ts_keys))
1016 .expect("should always work in this test"),
1017 };
1018 let mut engine_updates = HashMap::<String, bool>::new();
1019 engine_updates.insert("logins".to_string(), false);
1020 do_test(
1021 &client,
1022 &root_key,
1023 &mut pgs,
1024 Some(&engine_updates),
1025 old_state,
1026 &sm_seq_restarted,
1027 );
1028 let declined = match pgs {
1029 PersistedGlobalState::V2 { declined: d } => d,
1030 };
1031 // and check we now consider logins as declined.
1032 assert_eq!(declined, Some(vec!["logins".to_string()]));
1033 }
1034 }
1035
string_set(s: &[&str]) -> HashSet<String>1036 fn string_set(s: &[&str]) -> HashSet<String> {
1037 s.iter().map(ToString::to_string).collect()
1038 }
string_map<T: Clone>(s: &[(&str, T)]) -> HashMap<String, T>1039 fn string_map<T: Clone>(s: &[(&str, T)]) -> HashMap<String, T> {
1040 s.iter().map(|v| (v.0.to_string(), v.1.clone())).collect()
1041 }
1042 #[test]
test_engine_states()1043 fn test_engine_states() {
1044 assert_eq!(
1045 compute_engine_states(EngineStateInput {
1046 local_declined: string_set(&["foo", "bar"]),
1047 remote: None,
1048 user_changes: Default::default(),
1049 }),
1050 EngineStateOutput {
1051 declined: string_set(&["foo", "bar"]),
1052 // No wipes, no resets
1053 changes_needed: Default::default(),
1054 }
1055 );
1056 assert_eq!(
1057 compute_engine_states(EngineStateInput {
1058 local_declined: string_set(&["foo", "bar"]),
1059 remote: Some(RemoteEngineState {
1060 declined: string_set(&["foo"]),
1061 info_collections: string_set(&["bar"])
1062 }),
1063 user_changes: Default::default(),
1064 }),
1065 EngineStateOutput {
1066 // Now we have `foo`.
1067 declined: string_set(&["foo"]),
1068 // No wipes, no resets, should just be a local update.
1069 changes_needed: Default::default(),
1070 }
1071 );
1072 assert_eq!(
1073 compute_engine_states(EngineStateInput {
1074 local_declined: string_set(&["foo", "bar"]),
1075 remote: Some(RemoteEngineState {
1076 declined: string_set(&["foo", "bar", "quux"]),
1077 info_collections: string_set(&[])
1078 }),
1079 user_changes: Default::default(),
1080 }),
1081 EngineStateOutput {
1082 // Now we have `foo`.
1083 declined: string_set(&["foo", "bar", "quux"]),
1084 changes_needed: EngineChangesNeeded {
1085 // Should reset `quux`.
1086 local_resets: string_set(&["quux"]),
1087 // No wipes, though.
1088 remote_wipes: string_set(&[]),
1089 }
1090 }
1091 );
1092 assert_eq!(
1093 compute_engine_states(EngineStateInput {
1094 local_declined: string_set(&["bar", "baz"]),
1095 remote: Some(RemoteEngineState {
1096 declined: string_set(&["bar", "baz",]),
1097 info_collections: string_set(&["quux"])
1098 }),
1099 // Change a declined engine to undeclined.
1100 user_changes: string_map(&[("bar", true)]),
1101 }),
1102 EngineStateOutput {
1103 declined: string_set(&["baz"]),
1104 // No wipes, just undecline it.
1105 changes_needed: Default::default()
1106 }
1107 );
1108 assert_eq!(
1109 compute_engine_states(EngineStateInput {
1110 local_declined: string_set(&["bar", "baz"]),
1111 remote: Some(RemoteEngineState {
1112 declined: string_set(&["bar", "baz"]),
1113 info_collections: string_set(&["foo"])
1114 }),
1115 // Change an engine which exists remotely to declined.
1116 user_changes: string_map(&[("foo", false)]),
1117 }),
1118 EngineStateOutput {
1119 declined: string_set(&["baz", "bar", "foo"]),
1120 // No wipes, just undecline it.
1121 changes_needed: EngineChangesNeeded {
1122 // Should reset our local foo
1123 local_resets: string_set(&["foo"]),
1124 // And wipe the server.
1125 remote_wipes: string_set(&["foo"]),
1126 }
1127 }
1128 );
1129 }
1130 }
1131