1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5 use crate::error::*;
6 use crate::schema;
7 use rusqlite::types::{FromSql, ToSql};
8 use rusqlite::Connection;
9 use rusqlite::OpenFlags;
10 use sql_support::{ConnExt, SqlInterruptHandle, SqlInterruptScope};
11 use std::fs;
12 use std::ops::{Deref, DerefMut};
13 use std::path::{Path, PathBuf};
14 use std::result;
15 use std::sync::{atomic::AtomicUsize, Arc};
16 use url::Url;
17
18 /// A `StorageDb` wraps a read-write SQLite connection, and handles schema
19 /// migrations and recovering from database file corruption. It can be used
20 /// anywhere a `rusqlite::Connection` is expected, thanks to its `Deref{Mut}`
21 /// implementations.
22 ///
23 /// We only support a single writer connection - so that's the only thing we
24 /// store. It's still a bit overkill, but there's only so many yaks in a day.
25 pub struct StorageDb {
26 writer: Connection,
27 interrupt_counter: Arc<AtomicUsize>,
28 }
29 impl StorageDb {
30 /// Create a new, or fetch an already open, StorageDb backed by a file on disk.
new(db_path: impl AsRef<Path>) -> Result<Self>31 pub fn new(db_path: impl AsRef<Path>) -> Result<Self> {
32 let db_path = normalize_path(db_path)?;
33 Self::new_named(db_path)
34 }
35
36 /// Create a new, or fetch an already open, memory-based StorageDb. You must
37 /// provide a name, but you are still able to have a single writer and many
38 /// reader connections to the same memory DB open.
39 #[cfg(test)]
new_memory(db_path: &str) -> Result<Self>40 pub fn new_memory(db_path: &str) -> Result<Self> {
41 let name = PathBuf::from(format!("file:{}?mode=memory&cache=shared", db_path));
42 Self::new_named(name)
43 }
44
new_named(db_path: PathBuf) -> Result<Self>45 fn new_named(db_path: PathBuf) -> Result<Self> {
46 // We always create the read-write connection for an initial open so
47 // we can create the schema and/or do version upgrades.
48 let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX
49 | OpenFlags::SQLITE_OPEN_URI
50 | OpenFlags::SQLITE_OPEN_CREATE
51 | OpenFlags::SQLITE_OPEN_READ_WRITE;
52
53 let conn = Connection::open_with_flags(db_path.clone(), flags)?;
54 match init_sql_connection(&conn, true) {
55 Ok(()) => Ok(Self {
56 writer: conn,
57 interrupt_counter: Arc::new(AtomicUsize::new(0)),
58 }),
59 Err(e) => {
60 // like with places, failure to upgrade means "you lose your data"
61 if let ErrorKind::DatabaseUpgradeError = e.kind() {
62 fs::remove_file(&db_path)?;
63 Self::new_named(db_path)
64 } else {
65 Err(e)
66 }
67 }
68 }
69 }
70
71 /// Returns an interrupt handle for this database connection. This handle
72 /// should be handed out to consumers that want to interrupt long-running
73 /// operations. It's FFI-safe, and `Send + Sync`, since it only makes sense
74 /// to use from another thread. Calling `interrupt` on the handle sets a
75 /// flag on all currently active interrupt scopes.
interrupt_handle(&self) -> SqlInterruptHandle76 pub fn interrupt_handle(&self) -> SqlInterruptHandle {
77 SqlInterruptHandle::new(
78 self.writer.get_interrupt_handle(),
79 self.interrupt_counter.clone(),
80 )
81 }
82
83 /// Creates an object that knows when it's been interrupted. A new interrupt
84 /// scope should be created inside each method that does long-running
85 /// database work, like batch writes. This is the other side of a
86 /// `SqlInterruptHandle`: when a handle is interrupted, it flags all active
87 /// interrupt scopes as interrupted, too, so that they can abort pending
88 /// work as soon as possible.
89 #[allow(dead_code)]
begin_interrupt_scope(&self) -> SqlInterruptScope90 pub fn begin_interrupt_scope(&self) -> SqlInterruptScope {
91 SqlInterruptScope::new(self.interrupt_counter.clone())
92 }
93
94 /// Closes the database connection. If there are any unfinalized prepared
95 /// statements on the connection, `close` will fail and the `StorageDb` will
96 /// be returned to the caller so that it can retry, drop (via `mem::drop`)
97 // or leak (`mem::forget`) the connection.
98 ///
99 /// Keep in mind that dropping the connection tries to close it again, and
100 /// panics on error.
close(self) -> result::Result<(), (StorageDb, Error)>101 pub fn close(self) -> result::Result<(), (StorageDb, Error)> {
102 let StorageDb {
103 writer,
104 interrupt_counter,
105 } = self;
106 writer.close().map_err(|(writer, err)| {
107 (
108 StorageDb {
109 writer,
110 interrupt_counter,
111 },
112 err.into(),
113 )
114 })
115 }
116 }
117
118 impl Deref for StorageDb {
119 type Target = Connection;
120
deref(&self) -> &Self::Target121 fn deref(&self) -> &Self::Target {
122 &self.writer
123 }
124 }
125
126 impl DerefMut for StorageDb {
deref_mut(&mut self) -> &mut Self::Target127 fn deref_mut(&mut self) -> &mut Self::Target {
128 &mut self.writer
129 }
130 }
131
init_sql_connection(conn: &Connection, is_writable: bool) -> Result<()>132 fn init_sql_connection(conn: &Connection, is_writable: bool) -> Result<()> {
133 let initial_pragmas = "
134 -- We don't care about temp tables being persisted to disk.
135 PRAGMA temp_store = 2;
136 -- we unconditionally want write-ahead-logging mode
137 PRAGMA journal_mode=WAL;
138 -- foreign keys seem worth enforcing!
139 PRAGMA foreign_keys = ON;
140 ";
141
142 conn.execute_batch(initial_pragmas)?;
143 define_functions(&conn)?;
144 conn.set_prepared_statement_cache_capacity(128);
145 if is_writable {
146 let tx = conn.unchecked_transaction()?;
147 schema::init(&conn)?;
148 tx.commit()?;
149 };
150 Ok(())
151 }
152
define_functions(c: &Connection) -> Result<()>153 fn define_functions(c: &Connection) -> Result<()> {
154 use rusqlite::functions::FunctionFlags;
155 c.create_scalar_function(
156 "generate_guid",
157 0,
158 FunctionFlags::SQLITE_UTF8,
159 sql_fns::generate_guid,
160 )?;
161 Ok(())
162 }
163
164 pub(crate) mod sql_fns {
165 use rusqlite::{functions::Context, Result};
166 use sync_guid::Guid as SyncGuid;
167
168 #[inline(never)]
generate_guid(_ctx: &Context<'_>) -> Result<SyncGuid>169 pub fn generate_guid(_ctx: &Context<'_>) -> Result<SyncGuid> {
170 Ok(SyncGuid::random())
171 }
172 }
173
174 // These should be somewhere else...
put_meta(db: &Connection, key: &str, value: &dyn ToSql) -> Result<()>175 pub fn put_meta(db: &Connection, key: &str, value: &dyn ToSql) -> Result<()> {
176 db.conn().execute_named_cached(
177 "REPLACE INTO meta (key, value) VALUES (:key, :value)",
178 &[(":key", &key), (":value", value)],
179 )?;
180 Ok(())
181 }
182
get_meta<T: FromSql>(db: &Connection, key: &str) -> Result<Option<T>>183 pub fn get_meta<T: FromSql>(db: &Connection, key: &str) -> Result<Option<T>> {
184 let res = db.conn().try_query_one(
185 "SELECT value FROM meta WHERE key = :key",
186 &[(":key", &key)],
187 true,
188 )?;
189 Ok(res)
190 }
191
delete_meta(db: &Connection, key: &str) -> Result<()>192 pub fn delete_meta(db: &Connection, key: &str) -> Result<()> {
193 db.conn()
194 .execute_named_cached("DELETE FROM meta WHERE key = :key", &[(":key", &key)])?;
195 Ok(())
196 }
197
198 // Utilities for working with paths.
199 // (From places_utils - ideally these would be shared, but the use of
200 // ErrorKind values makes that non-trivial.
201
202 /// `Path` is basically just a `str` with no validation, and so in practice it
203 /// could contain a file URL. Rusqlite takes advantage of this a bit, and says
204 /// `AsRef<Path>` but really means "anything sqlite can take as an argument".
205 ///
206 /// Swift loves using file urls (the only support it has for file manipulation
207 /// is through file urls), so it's handy to support them if possible.
unurl_path(p: impl AsRef<Path>) -> PathBuf208 fn unurl_path(p: impl AsRef<Path>) -> PathBuf {
209 p.as_ref()
210 .to_str()
211 .and_then(|s| Url::parse(s).ok())
212 .and_then(|u| {
213 if u.scheme() == "file" {
214 u.to_file_path().ok()
215 } else {
216 None
217 }
218 })
219 .unwrap_or_else(|| p.as_ref().to_owned())
220 }
221
222 /// If `p` is a file URL, return it, otherwise try and make it one.
223 ///
224 /// Errors if `p` is a relative non-url path, or if it's a URL path
225 /// that's isn't a `file:` URL.
226 #[allow(dead_code)]
ensure_url_path(p: impl AsRef<Path>) -> Result<Url>227 pub fn ensure_url_path(p: impl AsRef<Path>) -> Result<Url> {
228 if let Some(u) = p.as_ref().to_str().and_then(|s| Url::parse(s).ok()) {
229 if u.scheme() == "file" {
230 Ok(u)
231 } else {
232 Err(ErrorKind::IllegalDatabasePath(p.as_ref().to_owned()).into())
233 }
234 } else {
235 let p = p.as_ref();
236 let u = Url::from_file_path(p).map_err(|_| ErrorKind::IllegalDatabasePath(p.to_owned()))?;
237 Ok(u)
238 }
239 }
240
241 /// As best as possible, convert `p` into an absolute path, resolving
242 /// all symlinks along the way.
243 ///
244 /// If `p` is a file url, it's converted to a path before this.
normalize_path(p: impl AsRef<Path>) -> Result<PathBuf>245 fn normalize_path(p: impl AsRef<Path>) -> Result<PathBuf> {
246 let path = unurl_path(p);
247 if let Ok(canonical) = path.canonicalize() {
248 return Ok(canonical);
249 }
250 // It probably doesn't exist yet. This is an error, although it seems to
251 // work on some systems.
252 //
253 // We resolve this by trying to canonicalize the parent directory, and
254 // appending the requested file name onto that. If we can't canonicalize
255 // the parent, we return an error.
256 //
257 // Also, we return errors if the path ends in "..", if there is no
258 // parent directory, etc.
259 let file_name = path
260 .file_name()
261 .ok_or_else(|| ErrorKind::IllegalDatabasePath(path.clone()))?;
262
263 let parent = path
264 .parent()
265 .ok_or_else(|| ErrorKind::IllegalDatabasePath(path.clone()))?;
266
267 let mut canonical = parent.canonicalize()?;
268 canonical.push(file_name);
269 Ok(canonical)
270 }
271
272 // Helpers for tests
273 #[cfg(test)]
274 pub mod test {
275 use super::*;
276 use std::sync::atomic::{AtomicUsize, Ordering};
277
278 // A helper for our tests to get their own memory Api.
279 static ATOMIC_COUNTER: AtomicUsize = AtomicUsize::new(0);
280
new_mem_db() -> StorageDb281 pub fn new_mem_db() -> StorageDb {
282 let _ = env_logger::try_init();
283 let counter = ATOMIC_COUNTER.fetch_add(1, Ordering::Relaxed);
284 StorageDb::new_memory(&format!("test-api-{}", counter)).expect("should get an API")
285 }
286 }
287
288 #[cfg(test)]
289 mod tests {
290 use super::test::*;
291 use super::*;
292
293 // Sanity check that we can create a database.
294 #[test]
test_open()295 fn test_open() {
296 new_mem_db();
297 // XXX - should we check anything else? Seems a bit pointless, but if
298 // we move the meta functions away from here then it's better than
299 // nothing.
300 }
301
302 #[test]
test_meta() -> Result<()>303 fn test_meta() -> Result<()> {
304 let writer = new_mem_db();
305 assert_eq!(get_meta::<String>(&writer, "foo")?, None);
306 put_meta(&writer, "foo", &"bar".to_string())?;
307 assert_eq!(get_meta(&writer, "foo")?, Some("bar".to_string()));
308 delete_meta(&writer, "foo")?;
309 assert_eq!(get_meta::<String>(&writer, "foo")?, None);
310 Ok(())
311 }
312 }
313