1/* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5/* This file looks to Myk Melez <myk@mozilla.org>'s Mozilla Labs snowl 6 * project's (https://hg.mozilla.org/labs/snowl/) modules/GlodaDatastore.jsm 7 * for inspiration and idioms (and also a name :). 8 */ 9 10const EXPORTED_SYMBOLS = ["GlodaDatastore"]; 11 12const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm"); 13 14const { 15 GlodaAttributeDBDef, 16 GlodaConversation, 17 GlodaFolder, 18 GlodaMessage, 19 GlodaContact, 20 GlodaIdentity, 21} = ChromeUtils.import("resource:///modules/gloda/GlodaDataModel.jsm"); 22const { GlodaDatabind } = ChromeUtils.import( 23 "resource:///modules/gloda/GlodaDatabind.jsm" 24); 25const { GlodaCollection, GlodaCollectionManager } = ChromeUtils.import( 26 "resource:///modules/gloda/Collection.jsm" 27); 28 29var MIN_CACHE_SIZE = 8 * 1048576; 30var MAX_CACHE_SIZE = 64 * 1048576; 31var MEMSIZE_FALLBACK_BYTES = 256 * 1048576; 32 33var PCH_LOG = console.createInstance({ 34 prefix: "gloda.ds.pch", 35 maxLogLevel: "Warn", 36 maxLogLevelPref: "gloda.loglevel", 37}); 38 39/** 40 * Commit async handler; hands off the notification to 41 * |GlodaDatastore._asyncCompleted|. 42 */ 43function PostCommitHandler(aCallbacks) { 44 this.callbacks = aCallbacks; 45 GlodaDatastore._pendingAsyncStatements++; 46} 47 48PostCommitHandler.prototype = { 49 handleResult(aResultSet) {}, 50 51 handleError(aError) { 52 PCH_LOG.error("database error:" + aError); 53 }, 54 55 handleCompletion(aReason) { 56 // just outright bail if we are shutdown 57 if (GlodaDatastore.datastoreIsShutdown) { 58 return; 59 } 60 61 if (aReason == Ci.mozIStorageStatementCallback.REASON_FINISHED) { 62 for (let callback of this.callbacks) { 63 try { 64 callback(); 65 } catch (ex) { 66 PCH_LOG.error( 67 "PostCommitHandler callback (" + 68 ex.fileName + 69 ":" + 70 ex.lineNumber + 71 ") threw: " + 72 ex 73 ); 74 } 75 } 76 } 77 try { 78 GlodaDatastore._asyncCompleted(); 79 } catch (e) { 80 PCH_LOG.error("Exception in handleCompletion:", e); 81 } 82 }, 83}; 84 85var QFQ_LOG = console.createInstance({ 86 prefix: "gloda.ds.qfq", 87 maxLogLevel: "Warn", 88 maxLogLevelPref: "gloda.loglevel", 89}); 90 91/** 92 * Singleton collection listener used by |QueryFromQueryCallback| to assist in 93 * the loading of referenced noun instances. Which is to say, messages have 94 * identities (specific e-mail addresses) associated with them via attributes. 95 * And these identities in turn reference / are referenced by contacts (the 96 * notion of a person). 97 * 98 * This listener is primarily concerned with fixing up the references in each 99 * noun instance to its referenced instances once they have been loaded. It 100 * also deals with caching so that our identity invariant is maintained: user 101 * code should only ever see one distinct instance of a thing at a time. 102 */ 103var QueryFromQueryResolver = { 104 onItemsAdded(aIgnoredItems, aCollection, aFake) { 105 let originColl = aCollection.dataStack 106 ? aCollection.dataStack.pop() 107 : aCollection.data; 108 // QFQ_LOG.debug("QFQR: originColl: " + originColl); 109 if (aCollection.completionShifter) { 110 aCollection.completionShifter.push(originColl); 111 } else { 112 aCollection.completionShifter = [originColl]; 113 } 114 115 if (!aFake) { 116 originColl.deferredCount--; 117 originColl.resolvedCount++; 118 } 119 120 // bail if we are still pending on some other load completion 121 if (originColl.deferredCount > 0) { 122 // QFQ_LOG.debug("QFQR: bailing " + originColl._nounDef.name); 123 return; 124 } 125 126 let referencesByNounID = originColl.masterCollection.referencesByNounID; 127 let inverseReferencesByNounID = 128 originColl.masterCollection.inverseReferencesByNounID; 129 130 if (originColl.pendingItems) { 131 for (let item of originColl.pendingItems) { 132 // QFQ_LOG.debug("QFQR: loading deferred " + item.NOUN_ID + ":" + item.id); 133 GlodaDatastore.loadNounDeferredDeps( 134 item, 135 referencesByNounID, 136 inverseReferencesByNounID 137 ); 138 } 139 140 // we need to consider the possibility that we are racing a collection very 141 // much like our own. as such, this means we need to perform cache 142 // unification as our last step. 143 GlodaCollectionManager.cacheLoadUnify( 144 originColl._nounDef.id, 145 originColl.pendingItems, 146 false 147 ); 148 149 // just directly tell the collection about the items. we know the query 150 // matches (at least until we introduce predicates that we cannot express 151 // in SQL.) 152 // QFQ_LOG.debug(" QFQR: about to trigger listener: " + originColl._listener + 153 // "with collection: " + originColl._nounDef.name); 154 originColl._onItemsAdded(originColl.pendingItems); 155 delete originColl.pendingItems; 156 delete originColl._pendingIdMap; 157 } 158 }, 159 onItemsModified() {}, 160 onItemsRemoved() {}, 161 onQueryCompleted(aCollection) { 162 let originColl = aCollection.completionShifter 163 ? aCollection.completionShifter.shift() 164 : aCollection.data; 165 // QFQ_LOG.debug(" QFQR about to trigger completion with collection: " + 166 // originColl._nounDef.name); 167 if (originColl.deferredCount <= 0) { 168 originColl._onQueryCompleted(); 169 } 170 }, 171}; 172 173/** 174 * Handles the results from a GlodaDatastore.queryFromQuery call in cooperation 175 * with the |QueryFromQueryResolver| collection listener. We do a lot of 176 * legwork related to satisfying references to other noun instances on the 177 * noun instances the user directly queried. Messages reference identities 178 * reference contacts which in turn (implicitly) reference identities again. 179 * We have to spin up those other queries and stitch things together. 180 * 181 * While the code is generally up to the existing set of tasks it is called to 182 * handle, I would not be surprised for it to fall down if things get more 183 * complex. Some of the logic here 'evolved' a bit and could benefit from 184 * additional documentation and a fresh go-through. 185 */ 186function QueryFromQueryCallback(aStatement, aNounDef, aCollection) { 187 this.statement = aStatement; 188 this.nounDef = aNounDef; 189 this.collection = aCollection; 190 191 // QFQ_LOG.debug("Creating QFQCallback for noun: " + aNounDef.name); 192 193 // the master collection holds the referencesByNounID 194 this.referencesByNounID = {}; 195 this.masterReferencesByNounID = this.collection.masterCollection.referencesByNounID; 196 this.inverseReferencesByNounID = {}; 197 this.masterInverseReferencesByNounID = this.collection.masterCollection.inverseReferencesByNounID; 198 // we need to contribute our references as we load things; we need this 199 // because of the potential for circular dependencies and our inability to 200 // put things into the caching layer (or collection's _idMap) until we have 201 // fully resolved things. 202 if (this.nounDef.id in this.masterReferencesByNounID) { 203 this.selfReferences = this.masterReferencesByNounID[this.nounDef.id]; 204 } else { 205 this.selfReferences = this.masterReferencesByNounID[this.nounDef.id] = {}; 206 } 207 if (this.nounDef.parentColumnAttr) { 208 if (this.nounDef.id in this.masterInverseReferencesByNounID) { 209 this.selfInverseReferences = this.masterInverseReferencesByNounID[ 210 this.nounDef.id 211 ]; 212 } else { 213 this.selfInverseReferences = this.masterInverseReferencesByNounID[ 214 this.nounDef.id 215 ] = {}; 216 } 217 } 218 219 this.needsLoads = false; 220 221 GlodaDatastore._pendingAsyncStatements++; 222} 223 224QueryFromQueryCallback.prototype = { 225 handleResult(aResultSet) { 226 try { 227 // just outright bail if we are shutdown 228 if (GlodaDatastore.datastoreIsShutdown) { 229 return; 230 } 231 232 let pendingItems = this.collection.pendingItems; 233 let pendingIdMap = this.collection._pendingIdMap; 234 let row; 235 let nounDef = this.nounDef; 236 let nounID = nounDef.id; 237 while ((row = aResultSet.getNextRow())) { 238 let item = nounDef.objFromRow.call(nounDef.datastore, row); 239 if (this.collection.stashedColumns) { 240 let stashed = (this.collection.stashedColumns[item.id] = []); 241 for (let iCol of this.collection.query.options.stashColumns) { 242 stashed.push(GlodaDatastore._getVariant(row, iCol)); 243 } 244 } 245 // try and replace the item with one from the cache, if we can 246 let cachedItem = GlodaCollectionManager.cacheLookupOne( 247 nounID, 248 item.id, 249 false 250 ); 251 252 // if we already have a copy in the pending id map, skip it 253 if (item.id in pendingIdMap) { 254 continue; 255 } 256 257 // QFQ_LOG.debug("loading item " + nounDef.id + ":" + item.id + " existing: " + 258 // this.selfReferences[item.id] + " cached: " + cachedItem); 259 if (cachedItem) { 260 item = cachedItem; 261 } else if (this.selfReferences[item.id] != null) { 262 // We may already have been loaded by this process. 263 item = this.selfReferences[item.id]; 264 } else { 265 // Perform loading logic which may produce reference dependencies. 266 this.needsLoads = 267 GlodaDatastore.loadNounItem( 268 item, 269 this.referencesByNounID, 270 this.inverseReferencesByNounID 271 ) || this.needsLoads; 272 } 273 274 // add ourself to the references by our id 275 // QFQ_LOG.debug("saving item " + nounDef.id + ":" + item.id + " to self-refs"); 276 this.selfReferences[item.id] = item; 277 278 // if we're tracking it, add ourselves to our parent's list of children 279 // too 280 if (this.selfInverseReferences) { 281 let parentID = item[nounDef.parentColumnAttr.idStorageAttributeName]; 282 let childrenList = this.selfInverseReferences[parentID]; 283 if (childrenList === undefined) { 284 childrenList = this.selfInverseReferences[parentID] = []; 285 } 286 childrenList.push(item); 287 } 288 289 pendingItems.push(item); 290 pendingIdMap[item.id] = item; 291 } 292 } catch (e) { 293 GlodaDatastore._log.error("Exception in handleResult:", e); 294 } 295 }, 296 297 handleError(aError) { 298 GlodaDatastore._log.error( 299 "Async queryFromQuery error: " + aError.result + ": " + aError.message 300 ); 301 }, 302 303 handleCompletion(aReason) { 304 try { 305 try { 306 this.statement.finalize(); 307 this.statement = null; 308 309 // just outright bail if we are shutdown 310 if (GlodaDatastore.datastoreIsShutdown) { 311 return; 312 } 313 314 // QFQ_LOG.debug("handleCompletion: " + this.collection._nounDef.name); 315 316 if (this.needsLoads) { 317 for (let nounID in this.referencesByNounID) { 318 let references = this.referencesByNounID[nounID]; 319 if (nounID == this.nounDef.id) { 320 continue; 321 } 322 let nounDef = GlodaDatastore._nounIDToDef[nounID]; 323 // QFQ_LOG.debug(" have references for noun: " + nounDef.name); 324 // try and load them out of the cache/existing collections. items in the 325 // cache will be fully formed, which is nice for us. 326 // XXX this mechanism will get dubious when we have multiple paths to a 327 // single noun-type. For example, a -> b -> c, a-> c; two paths to c 328 // and we're looking at issuing two requests to c, the latter of which 329 // will be a superset of the first one. This does not currently pose 330 // a problem because we only have a -> b -> c -> b, and sequential 331 // processing means no alarms and no surprises. 332 let masterReferences = this.masterReferencesByNounID[nounID]; 333 if (masterReferences === undefined) { 334 masterReferences = this.masterReferencesByNounID[nounID] = {}; 335 } 336 let outReferences; 337 if (nounDef.parentColumnAttr) { 338 outReferences = {}; 339 } else { 340 outReferences = masterReferences; 341 } 342 let [ 343 , 344 notFoundCount, 345 notFound, 346 ] = GlodaCollectionManager.cacheLookupMany( 347 nounDef.id, 348 references, 349 outReferences 350 ); 351 352 if (nounDef.parentColumnAttr) { 353 let inverseReferences; 354 if (nounDef.id in this.masterInverseReferencesByNounID) { 355 inverseReferences = this.masterInverseReferencesByNounID[ 356 nounDef.id 357 ]; 358 } else { 359 inverseReferences = this.masterInverseReferencesByNounID[ 360 nounDef.id 361 ] = {}; 362 } 363 364 for (let key in outReferences) { 365 let item = outReferences[key]; 366 masterReferences[item.id] = item; 367 let parentID = 368 item[nounDef.parentColumnAttr.idStorageAttributeName]; 369 let childrenList = inverseReferences[parentID]; 370 if (childrenList === undefined) { 371 childrenList = inverseReferences[parentID] = []; 372 } 373 childrenList.push(item); 374 } 375 } 376 377 // QFQ_LOG.debug(" found: " + foundCount + " not found: " + notFoundCount); 378 if (notFoundCount === 0) { 379 this.collection.resolvedCount++; 380 } else { 381 this.collection.deferredCount++; 382 let query = new nounDef.queryClass(); 383 query.id.apply(query, Object.keys(notFound)); 384 385 // we fully expect/allow for there being no such subcollection yet. 386 let subCollection = 387 nounDef.id in this.collection.masterCollection.subCollections 388 ? this.collection.masterCollection.subCollections[nounDef.id] 389 : undefined; 390 this.collection.masterCollection.subCollections[ 391 nounDef.id 392 ] = GlodaDatastore.queryFromQuery( 393 query, 394 QueryFromQueryResolver, 395 this.collection, 396 subCollection, 397 this.collection.masterCollection, 398 { becomeExplicit: true } 399 ); 400 } 401 } 402 403 for (let nounID in this.inverseReferencesByNounID) { 404 let inverseReferences = this.inverseReferencesByNounID[nounID]; 405 this.collection.deferredCount++; 406 let nounDef = GlodaDatastore._nounIDToDef[nounID]; 407 408 // QFQ_LOG.debug("Want to load inverse via " + nounDef.parentColumnAttr.boundName); 409 410 let query = new nounDef.queryClass(); 411 // we want to constrain using the parent column 412 let queryConstrainer = query[nounDef.parentColumnAttr.boundName]; 413 queryConstrainer.apply(query, Object.keys(inverseReferences)); 414 // we fully expect/allow for there being no such subcollection yet. 415 let subCollection = 416 nounDef.id in this.collection.masterCollection.subCollections 417 ? this.collection.masterCollection.subCollections[nounDef.id] 418 : undefined; 419 this.collection.masterCollection.subCollections[ 420 nounDef.id 421 ] = GlodaDatastore.queryFromQuery( 422 query, 423 QueryFromQueryResolver, 424 this.collection, 425 subCollection, 426 this.collection.masterCollection, 427 { becomeExplicit: true } 428 ); 429 } 430 } else { 431 this.collection.deferredCount--; 432 this.collection.resolvedCount++; 433 } 434 435 // QFQ_LOG.debug(" defer: " + this.collection.deferredCount + 436 // " resolved: " + this.collection.resolvedCount); 437 438 // process immediately and kick-up to the master collection... 439 if (this.collection.deferredCount <= 0) { 440 // this guy will resolve everyone using referencesByNounID and issue the 441 // call to this.collection._onItemsAdded to propagate things to the 442 // next concerned subCollection or the actual listener if this is the 443 // master collection. (Also, call _onQueryCompleted). 444 QueryFromQueryResolver.onItemsAdded( 445 null, 446 { data: this.collection }, 447 true 448 ); 449 QueryFromQueryResolver.onQueryCompleted({ data: this.collection }); 450 } 451 } catch (e) { 452 Cu.reportError(e); 453 QFQ_LOG.error("Exception:", e); 454 } 455 } finally { 456 GlodaDatastore._asyncCompleted(); 457 } 458 }, 459}; 460 461/** 462 * Used by |GlodaDatastore.folderCompactionPassBlockFetch| to accumulate the 463 * results and pass them back in to the compaction process in 464 * |GlodaMsgIndexer._worker_folderCompactionPass|. 465 */ 466function CompactionBlockFetcherHandler(aCallback) { 467 this.callback = aCallback; 468 this.idsAndMessageKeys = []; 469 GlodaDatastore._pendingAsyncStatements++; 470} 471CompactionBlockFetcherHandler.prototype = { 472 handleResult(aResultSet) { 473 let row; 474 while ((row = aResultSet.getNextRow())) { 475 this.idsAndMessageKeys.push([ 476 row.getInt64(0), // id 477 row.getInt64(1), // messageKey 478 row.getString(2), // headerMessageID 479 ]); 480 } 481 }, 482 handleError(aError) { 483 GlodaDatastore._log.error( 484 "CompactionBlockFetcherHandler error: " + 485 aError.result + 486 ": " + 487 aError.message 488 ); 489 }, 490 handleCompletion(aReason) { 491 GlodaDatastore._asyncCompleted(); 492 this.callback(this.idsAndMessageKeys); 493 }, 494}; 495 496/** 497 * Use this as the callback handler when you have a SQL query that returns a 498 * single row with a single integer column value, like a COUNT() query. 499 */ 500function SingletonResultValueHandler(aCallback) { 501 this.callback = aCallback; 502 this.result = null; 503 GlodaDatastore._pendingAsyncStatements++; 504} 505SingletonResultValueHandler.prototype = { 506 handleResult(aResultSet) { 507 let row; 508 while ((row = aResultSet.getNextRow())) { 509 this.result = row.getInt64(0); 510 } 511 }, 512 handleError(aError) { 513 GlodaDatastore._log.error( 514 "SingletonResultValueHandler error: " + 515 aError.result + 516 ": " + 517 aError.message 518 ); 519 }, 520 handleCompletion(aReason) { 521 GlodaDatastore._asyncCompleted(); 522 this.callback(this.result); 523 }, 524}; 525 526/** 527 * Wrapper that duplicates actions taken on a real statement to an explain 528 * statement. Currently only fires an explain statement once. 529 */ 530function ExplainedStatementWrapper( 531 aRealStatement, 532 aExplainStatement, 533 aSQLString, 534 aExplainHandler 535) { 536 this.real = aRealStatement; 537 this.explain = aExplainStatement; 538 this.sqlString = aSQLString; 539 this.explainHandler = aExplainHandler; 540 this.done = false; 541} 542ExplainedStatementWrapper.prototype = { 543 bindByIndex(aColIndex, aValue) { 544 this.real.bindByIndex(aColIndex, aValue); 545 if (!this.done) { 546 this.explain.bindByIndex(aColIndex, aValue); 547 } 548 }, 549 executeAsync(aCallback) { 550 if (!this.done) { 551 this.explainHandler.sqlEnRoute(this.sqlString); 552 this.explain.executeAsync(this.explainHandler); 553 this.explain.finalize(); 554 this.done = true; 555 } 556 return this.real.executeAsync(aCallback); 557 }, 558 finalize() { 559 if (!this.done) { 560 this.explain.finalize(); 561 } 562 this.real.finalize(); 563 }, 564}; 565 566/** 567 * Writes a single JSON document to the provide file path in a streaming 568 * fashion. At startup we open an array to place the queries in and at 569 * shutdown we close it. 570 */ 571function ExplainedStatementProcessor(aDumpPath) { 572 Services.obs.addObserver(this, "quit-application"); 573 574 this._sqlStack = []; 575 this._curOps = []; 576 this._objsWritten = 0; 577 578 let filePath = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile); 579 filePath.initWithPath(aDumpPath); 580 581 this._ostream = Cc[ 582 "@mozilla.org/network/file-output-stream;1" 583 ].createInstance(Ci.nsIFileOutputStream); 584 this._ostream.init(filePath, -1, -1, 0); 585 586 let s = '{"queries": ['; 587 this._ostream.write(s, s.length); 588} 589ExplainedStatementProcessor.prototype = { 590 sqlEnRoute(aSQLString) { 591 this._sqlStack.push(aSQLString); 592 }, 593 handleResult(aResultSet) { 594 let row; 595 // addr opcode (s) p1 p2 p3 p4 (s) p5 comment (s) 596 while ((row = aResultSet.getNextRow())) { 597 this._curOps.push([ 598 row.getInt64(0), // addr 599 row.getString(1), // opcode 600 row.getInt64(2), // p1 601 row.getInt64(3), // p2 602 row.getInt64(4), // p3 603 row.getString(5), // p4 604 row.getString(6), // p5 605 row.getString(7), // comment 606 ]); 607 } 608 }, 609 handleError(aError) { 610 Cu.reportError("Unexpected error in EXPLAIN handler: " + aError); 611 }, 612 handleCompletion(aReason) { 613 let obj = { 614 sql: this._sqlStack.shift(), 615 operations: this._curOps, 616 }; 617 let s = (this._objsWritten++ ? ", " : "") + JSON.stringify(obj, null, 2); 618 this._ostream.write(s, s.length); 619 620 this._curOps = []; 621 }, 622 623 observe(aSubject, aTopic, aData) { 624 if (aTopic == "quit-application") { 625 this.shutdown(); 626 } 627 }, 628 629 shutdown() { 630 let s = "]}"; 631 this._ostream.write(s, s.length); 632 this._ostream.close(); 633 634 Services.obs.removeObserver(this, "quit-application"); 635 }, 636}; 637 638// See the documentation on GlodaDatastore._schemaVersion to understand these: 639var DB_SCHEMA_ACCEPT_LEAVE_LOW = 31, 640 DB_SCHEMA_ACCEPT_LEAVE_HIGH = 34, 641 DB_SCHEMA_ACCEPT_DOWNGRADE_LOW = 35, 642 DB_SCHEMA_ACCEPT_DOWNGRADE_HIGH = 39, 643 DB_SCHEMA_DOWNGRADE_DELTA = 5; 644 645/** 646 * Database abstraction layer. Contains explicit SQL schemas for our 647 * fundamental representations (core 'nouns', if you will) as well as 648 * specialized functions for then dealing with each type of object. At the 649 * same time, we are beginning to support extension-provided tables, which 650 * call into question whether we really need our hand-rolled code, or could 651 * simply improve the extension-provided table case to work for most of our 652 * hand-rolled cases. 653 * For now, the argument can probably be made that our explicit schemas and code 654 * is readable/intuitive (not magic) and efficient (although generic stuff 655 * could also be made efficient, if slightly evil through use of eval or some 656 * other code generation mechanism.) 657 * 658 * === Data Model Interaction / Dependencies 659 * 660 * Dependent on and assumes limited knowledge of the GlodaDataModel.jsm 661 * implementations. GlodaDataModel.jsm actually has an implicit dependency on 662 * our implementation, reaching back into the datastore via the _datastore 663 * attribute which we pass into every instance we create. 664 * We pass a reference to ourself as we create the GlodaDataModel.jsm instances (and 665 * they store it as _datastore) because of a half-implemented attempt to make 666 * it possible to live in a world where we have multiple datastores. This 667 * would be desirable in the cases where we are dealing with multiple SQLite 668 * databases. This could be because of per-account global databases or 669 * some other segmentation. This was abandoned when the importance of 670 * per-account databases was diminished following public discussion, at least 671 * for the short-term, but no attempted was made to excise the feature or 672 * preclude it. (Merely a recognition that it's too much to try and implement 673 * correct right now, especially because our solution might just be another 674 * (aggregating) layer on top of things, rather than complicating the lower 675 * levels.) 676 * 677 * === Object Identity / Caching 678 * 679 * The issue of object identity is handled by integration with the Collection.jsm 680 * provided GlodaCollectionManager. By "Object Identity", I mean that we only 681 * should ever have one object instance alive at a time that corresponds to 682 * an underlying database row in the database. Where possible we avoid 683 * performing database look-ups when we can check if the object is already 684 * present in memory; in practice, this means when we are asking for an object 685 * by ID. When we cannot avoid a database query, we attempt to make sure that 686 * we do not return a duplicate object instance, instead replacing it with the 687 * 'live' copy of the object. (Ideally, we would avoid any redundant 688 * construction costs, but that is not currently the case.) 689 * Although you should consult the GlodaCollectionManager for details, the 690 * general idea is that we have 'collections' which represent views of the 691 * database (based on a query) which use a single mechanism for double duty. 692 * The collections are registered with the collection manager via weak 693 * reference. The first 'duty' is that since the collections may be desired 694 * to be 'live views' of the data, we want them to update as changes occur. 695 * The weak reference allows the collection manager to track the 'live' 696 * collections and update them. The second 'duty' is the caching/object 697 * identity duty. In theory, every live item should be referenced by at least 698 * one collection, making it reachable for object identity/caching purposes. 699 * There is also an explicit (inclusive) caching layer present to both try and 700 * avoid poor performance from some of the costs of this strategy, as well as 701 * to try and keep track of objects that are being worked with that are not 702 * (yet) tracked by a collection. Using a size-bounded cache is clearly not 703 * a guarantee of correctness for this, but is suspected will work quite well. 704 * (Well enough to be dangerous because the inevitable failure case will not be 705 * expected.) 706 * 707 * The current strategy may not be the optimal one, feel free to propose and/or 708 * implement better ones, especially if you have numbers. 709 * The current strategy is not fully implemented in this file, but the common 710 * cases are believed to be covered. (Namely, we fail to purge items from the 711 * cache as they are purged from the database.) 712 * 713 * === Things That May Not Be Obvious (Gotchas) 714 * 715 * Although the schema includes "triggers", they are currently not used 716 * and were added when thinking about implementing the feature. We will 717 * probably implement this feature at some point, which is why they are still 718 * in there. 719 * 720 * We, and the layers above us, are not sufficiently thorough at cleaning out 721 * data from the database, and may potentially orphan it _as new functionality 722 * is added in the future at layers above us_. That is, currently we should 723 * not be leaking database rows, but we may in the future. This is because 724 * we/the layers above us lack a mechanism to track dependencies based on 725 * attributes. Say a plugin exists that extracts recipes from messages and 726 * relates them via an attribute. To do so, it must create new recipe rows 727 * in its own table as new recipes are discovered. No automatic mechanism 728 * will purge recipes as their source messages are purged, nor does any 729 * event-driven mechanism explicitly inform the plugin. (It could infer 730 * such an event from the indexing/attribute-providing process, or poll the 731 * states of attributes to accomplish this, but that is not desirable.) This 732 * needs to be addressed, and may be best addressed at layers above 733 * GlodaDatastore.jsm. 734 * @namespace 735 */ 736var GlodaDatastore = { 737 _log: null, 738 739 /* see Gloda's documentation for these constants */ 740 kSpecialNotAtAll: 0, 741 kSpecialColumn: 16, 742 kSpecialColumnChildren: 16 | 1, 743 kSpecialColumnParent: 16 | 2, 744 kSpecialString: 32, 745 kSpecialFulltext: 64, 746 IGNORE_FACET: {}, 747 748 kConstraintIdIn: 0, 749 kConstraintIn: 1, 750 kConstraintRanges: 2, 751 kConstraintEquals: 3, 752 kConstraintStringLike: 4, 753 kConstraintFulltext: 5, 754 755 /* ******************* SCHEMA ******************* */ 756 757 /** 758 * Schema version policy. IMPORTANT! We expect the following potential things 759 * to happen in the life of gloda that can impact our schema and the ability 760 * to move between different versions of Thunderbird: 761 * 762 * - Fundamental changes to the schema so that two versions of Thunderbird 763 * cannot use the same global database. To wit, Thunderbird N+1 needs to 764 * blow away the database of Thunderbird N and reindex from scratch. 765 * Likewise, Thunderbird N will need to blow away Thunderbird N+1's 766 * database because it can't understand it. And we can't simply use a 767 * different file because there would be fatal bookkeeping losses. 768 * 769 * - Bidirectional minor schema changes (rare). 770 * Thunderbird N+1 does something that does not affect Thunderbird N's use 771 * of the database, and a user switching back to Thunderbird N will not be 772 * negatively impacted. It will also be fine when they go back to N+1 and 773 * N+1 will not be missing any vital data. The historic example of this is 774 * when we added a missing index that was important for performance. In 775 * that case, Thunderbird N could have potentially left the schema revision 776 * intact (if there was a safe revision), rather than swapping it on the 777 * downgrade, compelling N+1 to redo the transform on upgrade. 778 * 779 * - Backwards compatible, upgrade-transition minor schema changes. 780 * Thunderbird N+1 does something that does not require nuking the 781 * database / a full re-index, but does require processing on upgrade from 782 * a version of the database previously used by Thunderbird. These changes 783 * do not impact N's ability to use the database. For example, adding a 784 * new indexed attribute that affects a small number of messages could be 785 * handled by issuing a query on upgrade to dirty/index those messages. 786 * However, if the user goes back to N from N+1, when they upgrade to N+1 787 * again, we need to re-index. In this case N would need to have downgrade 788 * the schema revision. 789 * 790 * - Backwards incompatible, minor schema changes. 791 * Thunderbird N+1 does something that does not require nuking the database 792 * but will break Thunderbird N's ability to use the database. 793 * 794 * - Regression fixes. Sometimes we may land something that screws up 795 * databases, or the platform changes in a way that breaks our code and we 796 * had insufficient unit test coverage and so don't detect it until some 797 * databases have gotten messed up. 798 * 799 * Accordingly, every version of Thunderbird has a concept of potential schema 800 * versions with associated semantics to prepare for the minor schema upgrade 801 * cases were inter-op is possible. These ranges and their semantics are: 802 * - accepts and leaves intact. Covers: 803 * - regression fixes that no longer exist with the landing of the upgrade 804 * code as long as users never go back a build in the given channel. 805 * - bidirectional minor schema changes. 806 * - accepts but downgrades version to self. Covers: 807 * - backwards compatible, upgrade-transition minor schema changes. 808 * - nuke range (anything beyond a specific revision needs to be nuked): 809 * - backwards incompatible, minor scheme changes 810 * - fundamental changes 811 * 812 * 813 * SO, YOU WANT TO CHANGE THE SCHEMA? 814 * 815 * Use the ranges below for Thunderbird 11 as a guide, bumping things as little 816 * as possible. If we start to use up the "accepts and leaves intact" range 817 * without majorly changing things up, re-do the numbering acceptance range 818 * to give us additional runway. 819 * 820 * Also, if we keep needing non-nuking upgrades, consider adding an additional 821 * table to the database that can tell older versions of Thunderbird what to 822 * do when confronted with a newer database and where it can set flags to tell 823 * the newer Thunderbird what the older Thunderbird got up to. For example, 824 * it would be much easier if we just tell Thunderbird N what to do when it's 825 * confronted with the database. 826 * 827 * 828 * CURRENT STATE OF THE MIGRATION LOGIC: 829 * 830 * Thunderbird 11: uses 30 (regression fix from 26) 831 * - accepts and leaves intact: 31-34 832 * - accepts and downgrades by 5: 35-39 833 * - nukes: 40+ 834 */ 835 _schemaVersion: 30, 836 // what is the schema in the database right now? 837 _actualSchemaVersion: 0, 838 _schema: { 839 tables: { 840 // ----- Messages 841 folderLocations: { 842 columns: [ 843 ["id", "INTEGER PRIMARY KEY"], 844 ["folderURI", "TEXT NOT NULL"], 845 ["dirtyStatus", "INTEGER NOT NULL"], 846 ["name", "TEXT NOT NULL"], 847 ["indexingPriority", "INTEGER NOT NULL"], 848 ], 849 850 triggers: { 851 delete: "DELETE from messages WHERE folderID = OLD.id", 852 }, 853 }, 854 855 conversations: { 856 columns: [ 857 ["id", "INTEGER PRIMARY KEY"], 858 ["subject", "TEXT NOT NULL"], 859 ["oldestMessageDate", "INTEGER"], 860 ["newestMessageDate", "INTEGER"], 861 ], 862 863 indices: { 864 subject: ["subject"], 865 oldestMessageDate: ["oldestMessageDate"], 866 newestMessageDate: ["newestMessageDate"], 867 }, 868 869 fulltextColumns: [["subject", "TEXT"]], 870 871 triggers: { 872 delete: "DELETE from messages WHERE conversationID = OLD.id", 873 }, 874 }, 875 876 /** 877 * A message record correspond to an actual message stored in a folder 878 * somewhere, or is a ghost record indicating a message that we know 879 * should exist, but which we have not seen (and which we may never see). 880 * We represent these ghost messages by storing NULL values in the 881 * folderID and messageKey fields; this may need to change to other 882 * sentinel values if this somehow impacts performance. 883 */ 884 messages: { 885 columns: [ 886 ["id", "INTEGER PRIMARY KEY"], 887 ["folderID", "INTEGER"], 888 ["messageKey", "INTEGER"], 889 // conversationID used to have a REFERENCES but I'm losing it for 890 // presumed performance reasons and it doesn't do anything for us. 891 ["conversationID", "INTEGER NOT NULL"], 892 ["date", "INTEGER"], 893 // we used to have the parentID, but because of the very real 894 // possibility of multiple copies of a message with a given 895 // message-id, the parentID concept is unreliable. 896 ["headerMessageID", "TEXT"], 897 ["deleted", "INTEGER NOT NULL default 0"], 898 ["jsonAttributes", "TEXT"], 899 // Notability attempts to capture the static 'interestingness' of a 900 // message as a result of being starred/flagged, labeled, read 901 // multiple times, authored by someone in your address book or that 902 // you converse with a lot, etc. 903 ["notability", "INTEGER NOT NULL default 0"], 904 ], 905 906 indices: { 907 messageLocation: ["folderID", "messageKey"], 908 headerMessageID: ["headerMessageID"], 909 conversationID: ["conversationID"], 910 date: ["date"], 911 deleted: ["deleted"], 912 }, 913 914 // note: if reordering the columns, you need to change this file's 915 // row-loading logic, GlodaMsgSearcher.jsm's ranking usages and also the 916 // column saturations in nsGlodaRankerFunction 917 fulltextColumns: [ 918 ["body", "TEXT"], 919 ["subject", "TEXT"], 920 ["attachmentNames", "TEXT"], 921 ["author", "TEXT"], 922 ["recipients", "TEXT"], 923 ], 924 925 triggers: { 926 delete: "DELETE FROM messageAttributes WHERE messageID = OLD.id", 927 }, 928 }, 929 930 // ----- Attributes 931 attributeDefinitions: { 932 columns: [ 933 ["id", "INTEGER PRIMARY KEY"], 934 ["attributeType", "INTEGER NOT NULL"], 935 ["extensionName", "TEXT NOT NULL"], 936 ["name", "TEXT NOT NULL"], 937 ["parameter", "BLOB"], 938 ], 939 940 triggers: { 941 delete: "DELETE FROM messageAttributes WHERE attributeID = OLD.id", 942 }, 943 }, 944 945 messageAttributes: { 946 columns: [ 947 // conversationID and messageID used to have REFERENCES back to their 948 // appropriate types. I removed it when removing attributeID for 949 // better reasons and because the code is not capable of violating 950 // this constraint, so the check is just added cost. (And we have 951 // unit tests that sanity check my assertions.) 952 ["conversationID", "INTEGER NOT NULL"], 953 ["messageID", "INTEGER NOT NULL"], 954 // This used to be REFERENCES attributeDefinitions(id) but then we 955 // introduced sentinel values and it's hard to justify the effort 956 // to compel injection of the record or the overhead to do the 957 // references checking. 958 ["attributeID", "INTEGER NOT NULL"], 959 ["value", "NUMERIC"], 960 ], 961 962 indices: { 963 attribQuery: [ 964 "attributeID", 965 "value", 966 /* covering: */ "conversationID", 967 "messageID", 968 ], 969 // This is required for deletion of a message's attributes to be 970 // performant. We could optimize this index away if we changed our 971 // deletion logic to issue specific attribute deletions based on the 972 // information it already has available in the message's JSON blob. 973 // The rub there is that if we screwed up we could end up leaking 974 // attributes and there is a non-trivial performance overhead to 975 // the many requests it would cause (which can also be reduced in 976 // the future by changing our SQL dispatch code.) 977 messageAttribFastDeletion: ["messageID"], 978 }, 979 }, 980 981 // ----- Contacts / Identities 982 983 /** 984 * Corresponds to a human being and roughly to an address book entry. 985 * Contrast with an identity, which is a specific e-mail address, IRC 986 * nick, etc. Identities belong to contacts, and this relationship is 987 * expressed on the identityAttributes table. 988 */ 989 contacts: { 990 columns: [ 991 ["id", "INTEGER PRIMARY KEY"], 992 ["directoryUUID", "TEXT"], 993 ["contactUUID", "TEXT"], 994 ["popularity", "INTEGER"], 995 ["frecency", "INTEGER"], 996 ["name", "TEXT"], 997 ["jsonAttributes", "TEXT"], 998 ], 999 indices: { 1000 popularity: ["popularity"], 1001 frecency: ["frecency"], 1002 }, 1003 }, 1004 1005 contactAttributes: { 1006 columns: [ 1007 ["contactID", "INTEGER NOT NULL"], 1008 ["attributeID", "INTEGER NOT NULL"], 1009 ["value", "NUMERIC"], 1010 ], 1011 indices: { 1012 contactAttribQuery: [ 1013 "attributeID", 1014 "value", 1015 /* covering: */ "contactID", 1016 ], 1017 }, 1018 }, 1019 1020 /** 1021 * Identities correspond to specific e-mail addresses, IRC nicks, etc. 1022 */ 1023 identities: { 1024 columns: [ 1025 ["id", "INTEGER PRIMARY KEY"], 1026 ["contactID", "INTEGER NOT NULL"], 1027 ["kind", "TEXT NOT NULL"], // ex: email, irc, etc. 1028 ["value", "TEXT NOT NULL"], // ex: e-mail address, irc nick/handle... 1029 ["description", "NOT NULL"], // what makes this identity different 1030 // from the others? (ex: home, work, etc.) 1031 ["relay", "INTEGER NOT NULL"], // is the identity just a relay 1032 // mechanism? (ex: mailing list, twitter 'bouncer', IRC gateway, etc.) 1033 ], 1034 1035 indices: { 1036 contactQuery: ["contactID"], 1037 valueQuery: ["kind", "value"], 1038 }, 1039 }, 1040 }, 1041 }, 1042 1043 /* ******************* LOGIC ******************* */ 1044 /** 1045 * We only have one connection; this name exists for legacy reasons but helps 1046 * track when we are intentionally doing synchronous things during startup. 1047 * We do nothing synchronous once our setup has completed. 1048 */ 1049 syncConnection: null, 1050 /** 1051 * We only have one connection and we only do asynchronous things after setup; 1052 * this name still exists mainly for legacy reasons. 1053 */ 1054 asyncConnection: null, 1055 1056 /** 1057 * Our "mailnews.database.global.datastore." preferences branch for debug 1058 * notification handling. We register as an observer against this. 1059 */ 1060 _prefBranch: null, 1061 1062 /** 1063 * The unique ID assigned to an index when it has been built. This value 1064 * changes once the index has been rebuilt. 1065 */ 1066 _datastoreID: null, 1067 1068 /** 1069 * Initialize logging, create the database if it doesn't exist, "upgrade" it 1070 * if it does and it's not up-to-date, fill our authoritative folder uri/id 1071 * mapping. 1072 */ 1073 _init(aNounIDToDef) { 1074 this._log = console.createInstance({ 1075 prefix: "gloda.datastore", 1076 maxLogLevel: "Warn", 1077 maxLogLevelPref: "gloda.loglevel", 1078 }); 1079 this._log.debug("Beginning datastore initialization."); 1080 1081 this._nounIDToDef = aNounIDToDef; 1082 1083 let branch = Services.prefs.getBranch( 1084 "mailnews.database.global.datastore." 1085 ); 1086 this._prefBranch = branch; 1087 1088 // Not sure the weak reference really makes a difference given that we are a 1089 // GC root. 1090 branch.addObserver("", this); 1091 // claim the pref changed so we can centralize our logic there. 1092 this.observe(null, "nsPref:changed", "explainToPath"); 1093 1094 // Get the path to our global database 1095 var dbFile = Services.dirsvc.get("ProfD", Ci.nsIFile); 1096 dbFile.append("global-messages-db.sqlite"); 1097 1098 var dbConnection; 1099 1100 // Report about the size of the database through telemetry (if there's a 1101 // database, naturally). 1102 if (dbFile.exists()) { 1103 try { 1104 let h = Services.telemetry.getHistogramById( 1105 "THUNDERBIRD_GLODA_SIZE_MB" 1106 ); 1107 h.add(dbFile.fileSize / 1048576); 1108 } catch (e) { 1109 this._log.warn("Couldn't report telemetry", e); 1110 } 1111 } 1112 1113 // Create the file if it does not exist 1114 if (!dbFile.exists()) { 1115 this._log.debug("Creating database because it doesn't exist."); 1116 dbConnection = this._createDB(dbFile); 1117 } else { 1118 // It does exist, but we (someday) might need to upgrade the schema 1119 // (Exceptions may be thrown if the database is corrupt) 1120 try { 1121 dbConnection = Services.storage.openUnsharedDatabase(dbFile); 1122 let cacheSize = this._determineCachePages(dbConnection); 1123 // see _createDB... 1124 dbConnection.executeSimpleSQL("PRAGMA cache_size = " + cacheSize); 1125 dbConnection.executeSimpleSQL("PRAGMA synchronous = FULL"); 1126 1127 // Register custom tokenizer to index all language text 1128 var tokenizer = Cc["@mozilla.org/messenger/fts3tokenizer;1"].getService( 1129 Ci.nsIFts3Tokenizer 1130 ); 1131 tokenizer.registerTokenizer(dbConnection); 1132 1133 // -- database schema changes 1134 let dbSchemaVersion = (this._actualSchemaVersion = 1135 dbConnection.schemaVersion); 1136 // - database from the future! 1137 if (dbSchemaVersion > this._schemaVersion) { 1138 if ( 1139 dbSchemaVersion >= DB_SCHEMA_ACCEPT_LEAVE_LOW && 1140 dbSchemaVersion <= DB_SCHEMA_ACCEPT_LEAVE_HIGH 1141 ) { 1142 this._log.debug( 1143 "db from the future in acceptable range; leaving " + 1144 "version at: " + 1145 dbSchemaVersion 1146 ); 1147 } else if ( 1148 dbSchemaVersion >= DB_SCHEMA_ACCEPT_DOWNGRADE_LOW && 1149 dbSchemaVersion <= DB_SCHEMA_ACCEPT_DOWNGRADE_HIGH 1150 ) { 1151 let newVersion = dbSchemaVersion - DB_SCHEMA_DOWNGRADE_DELTA; 1152 this._log.debug( 1153 "db from the future in downgrade range; setting " + 1154 "version to " + 1155 newVersion + 1156 " down from " + 1157 dbSchemaVersion 1158 ); 1159 dbConnection.schemaVersion = this._actualSchemaVersion = newVersion; 1160 } else { 1161 // too far from the future, nuke it. 1162 dbConnection = this._nukeMigration(dbFile, dbConnection); 1163 } 1164 } else if (dbSchemaVersion < this._schemaVersion) { 1165 // - database from the past! migrate it, possibly. 1166 this._log.debug( 1167 "Need to migrate database. (DB version: " + 1168 this._actualSchemaVersion + 1169 " desired version: " + 1170 this._schemaVersion 1171 ); 1172 dbConnection = this._migrate( 1173 dbFile, 1174 dbConnection, 1175 this._actualSchemaVersion, 1176 this._schemaVersion 1177 ); 1178 this._log.debug("Migration call completed."); 1179 } 1180 // else: this database is juuust right. 1181 1182 // If we never had a datastore ID, make sure to create one now. 1183 if (!this._prefBranch.prefHasUserValue("id")) { 1184 this._datastoreID = this._generateDatastoreID(); 1185 this._prefBranch.setCharPref("id", this._datastoreID); 1186 } else { 1187 this._datastoreID = this._prefBranch.getCharPref("id"); 1188 } 1189 } catch (ex) { 1190 // Handle corrupt databases, other oddities 1191 if (ex.result == Cr.NS_ERROR_FILE_CORRUPTED) { 1192 this._log.warn("Database was corrupt, removing the old one."); 1193 dbFile.remove(false); 1194 this._log.warn("Removed old database, creating a new one."); 1195 dbConnection = this._createDB(dbFile); 1196 } else { 1197 this._log.error( 1198 "Unexpected error when trying to open the database:", 1199 ex 1200 ); 1201 throw ex; 1202 } 1203 } 1204 } 1205 1206 this.syncConnection = dbConnection; 1207 this.asyncConnection = dbConnection; 1208 1209 this._log.debug("Initializing folder mappings."); 1210 this._getAllFolderMappings(); 1211 // we need to figure out the next id's for all of the tables where we 1212 // manage that. 1213 this._log.debug("Populating managed id counters."); 1214 this._populateAttributeDefManagedId(); 1215 this._populateConversationManagedId(); 1216 this._populateMessageManagedId(); 1217 this._populateContactManagedId(); 1218 this._populateIdentityManagedId(); 1219 1220 // create the timer we use to periodically drop our references to folders 1221 // we no longer need XPCOM references to (or more significantly, their 1222 // message databases.) 1223 this._folderCleanupTimer = Cc["@mozilla.org/timer;1"].createInstance( 1224 Ci.nsITimer 1225 ); 1226 1227 this._log.debug("Completed datastore initialization."); 1228 }, 1229 1230 observe(aSubject, aTopic, aData) { 1231 if (aTopic != "nsPref:changed") { 1232 return; 1233 } 1234 1235 if (aData == "explainToPath") { 1236 let explainToPath = null; 1237 try { 1238 explainToPath = this._prefBranch.getCharPref("explainToPath"); 1239 if (explainToPath.trim() == "") { 1240 explainToPath = null; 1241 } 1242 } catch (ex) { 1243 // don't care if the pref is not there. 1244 } 1245 1246 // It is conceivable that the name is changing and this isn't a boolean 1247 // toggle, so always clean out the explain processor. 1248 if (this._explainProcessor) { 1249 this._explainProcessor.shutdown(); 1250 this._explainProcessor = null; 1251 } 1252 1253 if (explainToPath) { 1254 this._createAsyncStatement = this._createExplainedAsyncStatement; 1255 this._explainProcessor = new ExplainedStatementProcessor(explainToPath); 1256 } else { 1257 this._createAsyncStatement = this._realCreateAsyncStatement; 1258 } 1259 } 1260 }, 1261 1262 datastoreIsShutdown: false, 1263 1264 /** 1265 * Perform datastore shutdown. 1266 */ 1267 shutdown() { 1268 // Clear out any pending transaction by committing it. 1269 // The indexer has been shutdown by this point; it no longer has any active 1270 // indexing logic and it no longer has active event listeners capable of 1271 // generating new activity. 1272 // Semantic consistency of the database is guaranteed by the indexer's 1273 // strategy of only yielding control at coherent times. Although it takes 1274 // multiple calls and multiple SQL operations to update the state of our 1275 // database representations, the generator does not yield until it has 1276 // issued all the database statements required for said update. As such, 1277 // this commit will leave us in a good way (and the commit will happen 1278 // because closing the connection will drain the async execution queue.) 1279 while (this._transactionDepth) { 1280 this._log.info("Closing pending transaction out for shutdown."); 1281 // just schedule this function to be run again once the transaction has 1282 // been closed out. 1283 this._commitTransaction(); 1284 } 1285 1286 this.datastoreIsShutdown = true; 1287 1288 // shutdown our folder cleanup timer, if active and null it out. 1289 if (this._folderCleanupActive) { 1290 this._folderCleanupTimer.cancel(); 1291 } 1292 this._folderCleanupTimer = null; 1293 1294 this._log.info("Closing db connection"); 1295 1296 // we do not expect exceptions, but it's a good idea to avoid having our 1297 // shutdown process explode. 1298 try { 1299 this._cleanupAsyncStatements(); 1300 this._cleanupSyncStatements(); 1301 } catch (ex) { 1302 this._log.debug("Unexpected exception during statement cleanup: " + ex); 1303 } 1304 1305 // it's conceivable we might get a spurious exception here, but we really 1306 // shouldn't get one. again, we want to ensure shutdown runs to completion 1307 // and doesn't break our caller. 1308 try { 1309 // This currently causes all pending asynchronous operations to be run to 1310 // completion. this simplifies things from a correctness perspective, 1311 // and, honestly, is a lot easier than us tracking all of the async 1312 // event tasks so that we can explicitly cancel them. 1313 // This is a reasonable thing to do because we don't actually ever have 1314 // a huge number of statements outstanding. The indexing process needs 1315 // to issue async requests periodically, so the most we have in-flight 1316 // from a write perspective is strictly less than the work required to 1317 // update the database state for a single message. 1318 // However, the potential for multiple pending expensive queries does 1319 // exist, and it may be advisable to attempt to track and cancel those. 1320 // For simplicity we don't currently do this, and I expect this should 1321 // not pose a major problem, but those are famous last words. 1322 // Note: asyncClose does not spin a nested event loop, but the thread 1323 // manager shutdown code will spin the async thread's event loop, so it 1324 // nets out to be the same. 1325 this.asyncConnection.asyncClose(); 1326 } catch (ex) { 1327 this._log.debug( 1328 "Potentially expected exception during connection closure: " + ex 1329 ); 1330 } 1331 1332 this.asyncConnection = null; 1333 this.syncConnection = null; 1334 }, 1335 1336 /** 1337 * Generates and returns a UUID. 1338 * 1339 * @return a UUID as a string, ex: "c4dd0159-9287-480f-a648-a4613e147fdb" 1340 */ 1341 _generateDatastoreID() { 1342 let uuidGen = Cc["@mozilla.org/uuid-generator;1"].getService( 1343 Ci.nsIUUIDGenerator 1344 ); 1345 let uuid = uuidGen.generateUUID().toString(); 1346 // We snip off the { and } from each end of the UUID. 1347 return uuid.substring(1, uuid.length - 2); 1348 }, 1349 1350 _determineCachePages(aDBConn) { 1351 try { 1352 // For the details of the computations, one should read 1353 // nsNavHistory::InitDB. We're slightly diverging from them in the sense 1354 // that we won't allow gloda to use insane amounts of memory cache, and 1355 // we start with 1% instead of 6% like them. 1356 let pageStmt = aDBConn.createStatement("PRAGMA page_size"); 1357 pageStmt.executeStep(); 1358 let pageSize = pageStmt.row.page_size; 1359 pageStmt.finalize(); 1360 let cachePermillage = this._prefBranch.getIntPref( 1361 "cache_to_memory_permillage" 1362 ); 1363 cachePermillage = Math.min(cachePermillage, 50); 1364 cachePermillage = Math.max(cachePermillage, 0); 1365 let physMem = Services.sysinfo.getPropertyAsInt64("memsize"); 1366 if (physMem == 0) { 1367 physMem = MEMSIZE_FALLBACK_BYTES; 1368 } 1369 let cacheSize = Math.round((physMem * cachePermillage) / 1000); 1370 cacheSize = Math.max(cacheSize, MIN_CACHE_SIZE); 1371 cacheSize = Math.min(cacheSize, MAX_CACHE_SIZE); 1372 let cachePages = Math.round(cacheSize / pageSize); 1373 return cachePages; 1374 } catch (ex) { 1375 this._log.warn("Error determining cache size: " + ex); 1376 // A little bit lower than on my personal machine, will result in ~40M. 1377 return 1000; 1378 } 1379 }, 1380 1381 /** 1382 * Create our database; basically a wrapper around _createSchema. 1383 */ 1384 _createDB(aDBFile) { 1385 var dbConnection = Services.storage.openUnsharedDatabase(aDBFile); 1386 // We now follow the Firefox strategy for places, which mainly consists in 1387 // picking a default 32k page size, and then figuring out the amount of 1388 // cache accordingly. The default 32k come from mozilla/toolkit/storage, 1389 // but let's get it directly from sqlite in case they change it. 1390 let cachePages = this._determineCachePages(dbConnection); 1391 // This is a maximum number of pages to be used. If the database does not 1392 // get this large, then the memory does not get used. 1393 // Do not forget to update the code in _init if you change this value. 1394 dbConnection.executeSimpleSQL("PRAGMA cache_size = " + cachePages); 1395 // The mozStorage default is NORMAL which shaves off some fsyncs in the 1396 // interest of performance. Since everything we do after bootstrap is 1397 // async, we do not care about the performance, but we really want the 1398 // correctness. Bug reports and support avenues indicate a non-zero number 1399 // of corrupt databases. Note that this may not fix everything; OS X 1400 // also supports an F_FULLSYNC flag enabled by PRAGMA fullfsync that we are 1401 // not enabling that is much more comprehensive. We can think about 1402 // turning that on after we've seen how this reduces our corruption count. 1403 dbConnection.executeSimpleSQL("PRAGMA synchronous = FULL"); 1404 // Register custom tokenizer to index all language text 1405 var tokenizer = Cc["@mozilla.org/messenger/fts3tokenizer;1"].getService( 1406 Ci.nsIFts3Tokenizer 1407 ); 1408 tokenizer.registerTokenizer(dbConnection); 1409 1410 // We're creating a new database, so let's generate a new ID for this 1411 // version of the datastore. This way, indexers can know when the index 1412 // has been rebuilt in the event that they need to rebuild dependent data. 1413 this._datastoreID = this._generateDatastoreID(); 1414 this._prefBranch.setCharPref("id", this._datastoreID); 1415 1416 dbConnection.beginTransaction(); 1417 try { 1418 this._createSchema(dbConnection); 1419 dbConnection.commitTransaction(); 1420 } catch (ex) { 1421 dbConnection.rollbackTransaction(); 1422 throw ex; 1423 } 1424 1425 return dbConnection; 1426 }, 1427 1428 _createTableSchema(aDBConnection, aTableName, aTableDef) { 1429 // - Create the table 1430 this._log.info("Creating table: " + aTableName); 1431 let columnDefs = []; 1432 for (let [column, type] of aTableDef.columns) { 1433 columnDefs.push(column + " " + type); 1434 } 1435 aDBConnection.createTable(aTableName, columnDefs.join(", ")); 1436 1437 // - Create the fulltext table if applicable 1438 if (aTableDef.fulltextColumns) { 1439 let columnDefs = []; 1440 for (let [column, type] of aTableDef.fulltextColumns) { 1441 columnDefs.push(column + " " + type); 1442 } 1443 let createFulltextSQL = 1444 "CREATE VIRTUAL TABLE " + 1445 aTableName + 1446 "Text" + 1447 " USING fts3(tokenize mozporter, " + 1448 columnDefs.join(", ") + 1449 ")"; 1450 this._log.info("Creating fulltext table: " + createFulltextSQL); 1451 aDBConnection.executeSimpleSQL(createFulltextSQL); 1452 } 1453 1454 // - Create its indices 1455 if (aTableDef.indices) { 1456 for (let indexName in aTableDef.indices) { 1457 let indexColumns = aTableDef.indices[indexName]; 1458 aDBConnection.executeSimpleSQL( 1459 "CREATE INDEX " + 1460 indexName + 1461 " ON " + 1462 aTableName + 1463 "(" + 1464 indexColumns.join(", ") + 1465 ")" 1466 ); 1467 } 1468 } 1469 1470 // - Create the attributes table if applicable 1471 if (aTableDef.genericAttributes) { 1472 aTableDef.genericAttributes = { 1473 columns: [ 1474 ["nounID", "INTEGER NOT NULL"], 1475 ["attributeID", "INTEGER NOT NULL"], 1476 ["value", "NUMERIC"], 1477 ], 1478 indices: {}, 1479 }; 1480 aTableDef.genericAttributes.indices[aTableName + "AttribQuery"] = [ 1481 "attributeID", 1482 "value", 1483 /* covering: */ "nounID", 1484 ]; 1485 // let's use this very function! (since we created genericAttributes, 1486 // explodey recursion is avoided.) 1487 this._createTableSchema( 1488 aDBConnection, 1489 aTableName + "Attributes", 1490 aTableDef.genericAttributes 1491 ); 1492 } 1493 }, 1494 1495 /** 1496 * Create our database schema assuming a newly created database. This 1497 * comes down to creating normal tables, their full-text variants (if 1498 * applicable), and their indices. 1499 */ 1500 _createSchema(aDBConnection) { 1501 // -- For each table... 1502 for (let tableName in this._schema.tables) { 1503 let tableDef = this._schema.tables[tableName]; 1504 this._createTableSchema(aDBConnection, tableName, tableDef); 1505 } 1506 1507 aDBConnection.schemaVersion = this._actualSchemaVersion = this._schemaVersion; 1508 }, 1509 1510 /** 1511 * Create a table for a noun, replete with data binding. 1512 */ 1513 createNounTable(aNounDef) { 1514 // give it a _jsonText attribute if appropriate... 1515 if (aNounDef.allowsArbitraryAttrs) { 1516 aNounDef.schema.columns.push(["jsonAttributes", "STRING", "_jsonText"]); 1517 } 1518 // check if the table exists 1519 if (!this.asyncConnection.tableExists(aNounDef.tableName)) { 1520 // it doesn't! create it (and its potentially many variants) 1521 try { 1522 this._createTableSchema( 1523 this.asyncConnection, 1524 aNounDef.tableName, 1525 aNounDef.schema 1526 ); 1527 } catch (ex) { 1528 this._log.error( 1529 "Problem creating table " + 1530 aNounDef.tableName + 1531 " " + 1532 "because: " + 1533 ex + 1534 " at " + 1535 ex.fileName + 1536 ":" + 1537 ex.lineNumber 1538 ); 1539 return; 1540 } 1541 } 1542 1543 aNounDef._dataBinder = new GlodaDatabind(aNounDef, this); 1544 aNounDef.datastore = aNounDef._dataBinder; 1545 aNounDef.objFromRow = aNounDef._dataBinder.objFromRow; 1546 aNounDef.objInsert = aNounDef._dataBinder.objInsert; 1547 aNounDef.objUpdate = aNounDef._dataBinder.objUpdate; 1548 aNounDef.dbAttribAdjuster = aNounDef._dataBinder.adjustAttributes; 1549 1550 if (aNounDef.schema.genericAttributes) { 1551 aNounDef.attrTableName = aNounDef.tableName + "Attributes"; 1552 aNounDef.attrIDColumnName = "nounID"; 1553 } 1554 }, 1555 1556 _nukeMigration(aDBFile, aDBConnection) { 1557 aDBConnection.close(); 1558 aDBFile.remove(false); 1559 this._log.warn( 1560 "Global database has been purged due to schema change. " + 1561 "old version was " + 1562 this._actualSchemaVersion + 1563 ", new version is: " + 1564 this._schemaVersion 1565 ); 1566 return this._createDB(aDBFile); 1567 }, 1568 1569 /** 1570 * Migrate the database _to the latest version_ from an older version. We 1571 * only keep enough logic around to get us to the recent version. This code 1572 * is not a time machine! If we need to blow away the database to get to the 1573 * most recent version, then that's the sum total of the migration! 1574 */ 1575 _migrate(aDBFile, aDBConnection, aCurVersion, aNewVersion) { 1576 // version 12: 1577 // - notability column added 1578 // version 13: 1579 // - we are adding a new fulltext index column. blow away! 1580 // - note that I screwed up and failed to mark the schema change; apparently 1581 // no database will claim to be version 13... 1582 // version 14ish, still labeled 13?: 1583 // - new attributes: forwarded, repliedTo, bcc, recipients 1584 // - altered fromMeTo and fromMeCc to fromMe 1585 // - altered toMe and ccMe to just be toMe 1586 // - exposes bcc to cc-related attributes 1587 // - MIME type DB schema overhaul 1588 // version 15ish, still labeled 13: 1589 // - change tokenizer to mozporter to support CJK 1590 // (We are slip-streaming this so that only people who want to test CJK 1591 // have to test it. We will properly bump the schema revision when the 1592 // gloda correctness patch lands.) 1593 // version 16ish, labeled 14 and now 16 1594 // - gloda message id's start from 32 now 1595 // - all kinds of correctness changes (blow away) 1596 // version 17 1597 // - more correctness fixes. (blow away) 1598 // version 18 1599 // - significant empty set support (blow away) 1600 // version 19 1601 // - there was a typo that was resulting in deleted getting set to the 1602 // numeric value of the javascript undefined value. (migrate-able) 1603 // version 20 1604 // - tokenizer changes to provide for case/accent-folding. (blow away) 1605 // version 21 1606 // - add the messagesAttribFastDeletion index we thought was already covered 1607 // by an index we removed a while ago (migrate-able) 1608 // version 26 1609 // - bump page size and also cache size (blow away) 1610 // version 30 1611 // - recover from bug 732372 that affected TB 11 beta / TB 12 alpha / TB 13 1612 // trunk. The fix is bug 734507. The revision bump happens 1613 // asynchronously. (migrate-able) 1614 1615 // nuke if prior to 26 1616 if (aCurVersion < 26) { 1617 return this._nukeMigration(aDBFile, aDBConnection); 1618 } 1619 1620 // They must be desiring our "a.contact is undefined" fix! 1621 // This fix runs asynchronously as the first indexing job the indexer ever 1622 // performs. It is scheduled by the enabling of the message indexer and 1623 // it is the one that updates the schema version when done. 1624 1625 // return the same DB connection since we didn't create a new one or do 1626 // anything. 1627 return aDBConnection; 1628 }, 1629 1630 /** 1631 * Asynchronously update the schema version; only for use by in-tree callers 1632 * who asynchronously perform migration work triggered by their initial 1633 * indexing sweep and who have properly updated the schema version in all 1634 * the appropriate locations in this file. 1635 * 1636 * This is done without doing anything about the current transaction state, 1637 * which is desired. 1638 */ 1639 _updateSchemaVersion(newSchemaVersion) { 1640 this._actualSchemaVersion = newSchemaVersion; 1641 let stmt = this._createAsyncStatement( 1642 // we need to concat; pragmas don't like "?1" binds 1643 "PRAGMA user_version = " + newSchemaVersion, 1644 true 1645 ); 1646 stmt.executeAsync(this.trackAsync()); 1647 stmt.finalize(); 1648 }, 1649 1650 _outstandingAsyncStatements: [], 1651 1652 /** 1653 * Unless debugging, this is just _realCreateAsyncStatement, but in some 1654 * debugging modes this is instead the helpful wrapper 1655 * _createExplainedAsyncStatement. 1656 */ 1657 _createAsyncStatement: null, 1658 1659 _realCreateAsyncStatement(aSQLString, aWillFinalize) { 1660 let statement = null; 1661 try { 1662 statement = this.asyncConnection.createAsyncStatement(aSQLString); 1663 } catch (ex) { 1664 throw new Error( 1665 "error creating async statement " + 1666 aSQLString + 1667 " - " + 1668 this.asyncConnection.lastError + 1669 ": " + 1670 this.asyncConnection.lastErrorString + 1671 " - " + 1672 ex 1673 ); 1674 } 1675 1676 if (!aWillFinalize) { 1677 this._outstandingAsyncStatements.push(statement); 1678 } 1679 1680 return statement; 1681 }, 1682 1683 /** 1684 * The ExplainedStatementProcessor instance used by 1685 * _createExplainedAsyncStatement. This will be null if 1686 * _createExplainedAsyncStatement is not being used as _createAsyncStatement. 1687 */ 1688 _explainProcessor: null, 1689 1690 /** 1691 * Wrapped version of _createAsyncStatement that EXPLAINs the statement. When 1692 * used this decorates _createAsyncStatement, in which case we are found at 1693 * that name and the original is at _orig_createAsyncStatement. This is 1694 * controlled by the explainToPath preference (see |_init|). 1695 */ 1696 _createExplainedAsyncStatement(aSQLString, aWillFinalize) { 1697 let realStatement = this._realCreateAsyncStatement( 1698 aSQLString, 1699 aWillFinalize 1700 ); 1701 // don't wrap transaction control statements. 1702 if ( 1703 aSQLString == "COMMIT" || 1704 aSQLString == "BEGIN TRANSACTION" || 1705 aSQLString == "ROLLBACK" 1706 ) { 1707 return realStatement; 1708 } 1709 1710 let explainSQL = "EXPLAIN " + aSQLString; 1711 let explainStatement = this._realCreateAsyncStatement(explainSQL); 1712 1713 return new ExplainedStatementWrapper( 1714 realStatement, 1715 explainStatement, 1716 aSQLString, 1717 this._explainProcessor 1718 ); 1719 }, 1720 1721 _cleanupAsyncStatements() { 1722 this._outstandingAsyncStatements.forEach(stmt => stmt.finalize()); 1723 }, 1724 1725 _outstandingSyncStatements: [], 1726 1727 _createSyncStatement(aSQLString, aWillFinalize) { 1728 let statement = null; 1729 try { 1730 statement = this.syncConnection.createStatement(aSQLString); 1731 } catch (ex) { 1732 throw new Error( 1733 "error creating sync statement " + 1734 aSQLString + 1735 " - " + 1736 this.syncConnection.lastError + 1737 ": " + 1738 this.syncConnection.lastErrorString + 1739 " - " + 1740 ex 1741 ); 1742 } 1743 1744 if (!aWillFinalize) { 1745 this._outstandingSyncStatements.push(statement); 1746 } 1747 1748 return statement; 1749 }, 1750 1751 _cleanupSyncStatements() { 1752 this._outstandingSyncStatements.forEach(stmt => stmt.finalize()); 1753 }, 1754 1755 /** 1756 * Perform a synchronous executeStep on the statement, handling any 1757 * SQLITE_BUSY fallout that could conceivably happen from a collision on our 1758 * read with the async writes. 1759 * Basically we keep trying until we succeed or run out of tries. 1760 * We believe this to be a reasonable course of action because we don't 1761 * expect this to happen much. 1762 */ 1763 _syncStep(aStatement) { 1764 let tries = 0; 1765 while (tries < 32000) { 1766 try { 1767 return aStatement.executeStep(); 1768 } catch (e) { 1769 // SQLITE_BUSY becomes NS_ERROR_FAILURE 1770 if (e.result == Cr.NS_ERROR_FAILURE) { 1771 tries++; 1772 // we really need to delay here, somehow. unfortunately, we can't 1773 // allow event processing to happen, and most of the things we could 1774 // do to delay ourselves result in event processing happening. (Use 1775 // of a timer, a synchronous dispatch, etc.) 1776 // in theory, nsIThreadEventFilter could allow us to stop other events 1777 // that aren't our timer from happening, but it seems slightly 1778 // dangerous and 'notxpcom' suggests it ain't happening anyways... 1779 // so, let's just be dumb and hope that the underlying file I/O going 1780 // on makes us more likely to yield to the other thread so it can 1781 // finish what it is doing... 1782 } else { 1783 throw e; 1784 } 1785 } 1786 } 1787 this._log.error("Synchronous step gave up after " + tries + " tries."); 1788 return false; 1789 }, 1790 1791 _bindVariant(aStatement, aIndex, aVariant) { 1792 aStatement.bindByIndex(aIndex, aVariant); 1793 }, 1794 1795 /** 1796 * Helper that uses the appropriate getter given the data type; should be 1797 * mooted once we move to 1.9.2 and can use built-in variant support. 1798 */ 1799 _getVariant(aRow, aIndex) { 1800 let typeOfIndex = aRow.getTypeOfIndex(aIndex); 1801 if (typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 1802 // XPConnect would just end up going through an intermediary double stage 1803 // for the int64 case anyways... 1804 return null; 1805 } 1806 if ( 1807 typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_INTEGER || 1808 typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_DOUBLE 1809 ) { 1810 return aRow.getDouble(aIndex); 1811 } 1812 // typeOfIndex == Ci.mozIStorageValueArray.VALUE_TYPE_TEXT 1813 return aRow.getString(aIndex); 1814 }, 1815 1816 /** Simple nested transaction support as a performance optimization. */ 1817 _transactionDepth: 0, 1818 _transactionGood: false, 1819 1820 /** 1821 * Self-memoizing BEGIN TRANSACTION statement. 1822 */ 1823 get _beginTransactionStatement() { 1824 let statement = this._createAsyncStatement("BEGIN TRANSACTION"); 1825 this.__defineGetter__("_beginTransactionStatement", () => statement); 1826 return this._beginTransactionStatement; 1827 }, 1828 1829 /** 1830 * Self-memoizing COMMIT statement. 1831 */ 1832 get _commitTransactionStatement() { 1833 let statement = this._createAsyncStatement("COMMIT"); 1834 this.__defineGetter__("_commitTransactionStatement", () => statement); 1835 return this._commitTransactionStatement; 1836 }, 1837 1838 /** 1839 * Self-memoizing ROLLBACK statement. 1840 */ 1841 get _rollbackTransactionStatement() { 1842 let statement = this._createAsyncStatement("ROLLBACK"); 1843 this.__defineGetter__("_rollbackTransactionStatement", () => statement); 1844 return this._rollbackTransactionStatement; 1845 }, 1846 1847 _pendingPostCommitCallbacks: null, 1848 /** 1849 * Register a callback to be invoked when the current transaction's commit 1850 * completes. 1851 */ 1852 runPostCommit(aCallback) { 1853 this._pendingPostCommitCallbacks.push(aCallback); 1854 }, 1855 1856 /** 1857 * Begin a potentially nested transaction; only the outermost transaction gets 1858 * to be an actual transaction, and the failure of any nested transaction 1859 * results in a rollback of the entire outer transaction. If you really 1860 * need an atomic transaction 1861 */ 1862 _beginTransaction() { 1863 if (this._transactionDepth == 0) { 1864 this._pendingPostCommitCallbacks = []; 1865 this._beginTransactionStatement.executeAsync(this.trackAsync()); 1866 this._transactionGood = true; 1867 } 1868 this._transactionDepth++; 1869 }, 1870 /** 1871 * Commit a potentially nested transaction; if we are the outer-most 1872 * transaction and no sub-transaction issues a rollback 1873 * (via _rollbackTransaction) then we commit, otherwise we rollback. 1874 */ 1875 _commitTransaction() { 1876 this._transactionDepth--; 1877 if (this._transactionDepth == 0) { 1878 try { 1879 if (this._transactionGood) { 1880 this._commitTransactionStatement.executeAsync( 1881 new PostCommitHandler(this._pendingPostCommitCallbacks) 1882 ); 1883 } else { 1884 this._rollbackTransactionStatement.executeAsync(this.trackAsync()); 1885 } 1886 } catch (ex) { 1887 this._log.error("Commit problem:", ex); 1888 } 1889 this._pendingPostCommitCallbacks = []; 1890 } 1891 }, 1892 /** 1893 * Abort the commit of the potentially nested transaction. If we are not the 1894 * outermost transaction, we set a flag that tells the outermost transaction 1895 * that it must roll back. 1896 */ 1897 _rollbackTransaction() { 1898 this._transactionDepth--; 1899 this._transactionGood = false; 1900 if (this._transactionDepth == 0) { 1901 try { 1902 this._rollbackTransactionStatement.executeAsync(this.trackAsync()); 1903 } catch (ex) { 1904 this._log.error("Rollback problem:", ex); 1905 } 1906 } 1907 }, 1908 1909 _pendingAsyncStatements: 0, 1910 /** 1911 * The function to call, if any, when we hit 0 pending async statements. 1912 */ 1913 _pendingAsyncCompletedListener: null, 1914 _asyncCompleted() { 1915 if (--this._pendingAsyncStatements == 0) { 1916 if (this._pendingAsyncCompletedListener !== null) { 1917 this._pendingAsyncCompletedListener(); 1918 this._pendingAsyncCompletedListener = null; 1919 } 1920 } 1921 }, 1922 _asyncTrackerListener: { 1923 handleResult() {}, 1924 handleError(aError) { 1925 GlodaDatastore._log.error( 1926 "got error in _asyncTrackerListener.handleError(): " + 1927 aError.result + 1928 ": " + 1929 aError.message 1930 ); 1931 }, 1932 handleCompletion() { 1933 try { 1934 // the helper method exists because the other classes need to call it too 1935 GlodaDatastore._asyncCompleted(); 1936 } catch (e) { 1937 this._log.error("Exception in handleCompletion:", e); 1938 } 1939 }, 1940 }, 1941 /** 1942 * Increments _pendingAsyncStatements and returns a listener that will 1943 * decrement the value when the statement completes. 1944 */ 1945 trackAsync() { 1946 this._pendingAsyncStatements++; 1947 return this._asyncTrackerListener; 1948 }, 1949 1950 /* ********** Attribute Definitions ********** */ 1951 /** Maps (attribute def) compound names to the GlodaAttributeDBDef objects. */ 1952 _attributeDBDefs: {}, 1953 /** Map attribute ID to the definition and parameter value that produce it. */ 1954 _attributeIDToDBDefAndParam: {}, 1955 1956 /** 1957 * This attribute id indicates that we are encoding that a non-singular 1958 * attribute has an empty set. The value payload that goes with this should 1959 * the attribute id of the attribute we are talking about. 1960 */ 1961 kEmptySetAttrId: 1, 1962 1963 /** 1964 * We maintain the attributeDefinitions next id counter mainly because we can. 1965 * Since we mediate the access, there's no real risk to doing so, and it 1966 * allows us to keep the writes on the async connection without having to 1967 * wait for a completion notification. 1968 * 1969 * Start from 32 so we can have a number of sentinel values. 1970 */ 1971 _nextAttributeId: 32, 1972 1973 _populateAttributeDefManagedId() { 1974 let stmt = this._createSyncStatement( 1975 "SELECT MAX(id) FROM attributeDefinitions", 1976 true 1977 ); 1978 if (stmt.executeStep()) { 1979 // no chance of this SQLITE_BUSY on this call 1980 // 0 gets returned even if there are no messages... 1981 let highestSeen = stmt.getInt64(0); 1982 if (highestSeen != 0) { 1983 this._nextAttributeId = highestSeen + 1; 1984 } 1985 } 1986 stmt.finalize(); 1987 }, 1988 1989 get _insertAttributeDefStatement() { 1990 let statement = this._createAsyncStatement( 1991 "INSERT INTO attributeDefinitions (id, attributeType, extensionName, \ 1992 name, parameter) \ 1993 VALUES (?1, ?2, ?3, ?4, ?5)" 1994 ); 1995 this.__defineGetter__("_insertAttributeDefStatement", () => statement); 1996 return this._insertAttributeDefStatement; 1997 }, 1998 1999 /** 2000 * Create an attribute definition and return the row ID. Special/atypical 2001 * in that it doesn't directly return a GlodaAttributeDBDef; we leave that up 2002 * to the caller since they know much more than actually needs to go in the 2003 * database. 2004 * 2005 * @return The attribute id allocated to this attribute. 2006 */ 2007 _createAttributeDef(aAttrType, aExtensionName, aAttrName, aParameter) { 2008 let attributeId = this._nextAttributeId++; 2009 2010 let iads = this._insertAttributeDefStatement; 2011 iads.bindByIndex(0, attributeId); 2012 iads.bindByIndex(1, aAttrType); 2013 iads.bindByIndex(2, aExtensionName); 2014 iads.bindByIndex(3, aAttrName); 2015 this._bindVariant(iads, 4, aParameter); 2016 2017 iads.executeAsync(this.trackAsync()); 2018 2019 return attributeId; 2020 }, 2021 2022 /** 2023 * Sync-ly look-up all the attribute definitions, populating our authoritative 2024 * _attributeDBDefss and _attributeIDToDBDefAndParam maps. (In other words, 2025 * once this method is called, those maps should always be in sync with the 2026 * underlying database.) 2027 */ 2028 getAllAttributes() { 2029 let stmt = this._createSyncStatement( 2030 "SELECT id, attributeType, extensionName, name, parameter \ 2031 FROM attributeDefinitions", 2032 true 2033 ); 2034 2035 // map compound name to the attribute 2036 let attribs = {}; 2037 // map the attribute id to [attribute, parameter] where parameter is null 2038 // in cases where parameter is unused. 2039 let idToAttribAndParam = {}; 2040 2041 this._log.info("loading all attribute defs"); 2042 2043 while (stmt.executeStep()) { 2044 // no chance of this SQLITE_BUSY on this call 2045 let rowId = stmt.getInt64(0); 2046 let rowAttributeType = stmt.getInt64(1); 2047 let rowExtensionName = stmt.getString(2); 2048 let rowName = stmt.getString(3); 2049 let rowParameter = this._getVariant(stmt, 4); 2050 2051 let compoundName = rowExtensionName + ":" + rowName; 2052 2053 let attrib; 2054 if (compoundName in attribs) { 2055 attrib = attribs[compoundName]; 2056 } else { 2057 attrib = new GlodaAttributeDBDef( 2058 this, 2059 /* aID */ null, 2060 compoundName, 2061 rowAttributeType, 2062 rowExtensionName, 2063 rowName 2064 ); 2065 attribs[compoundName] = attrib; 2066 } 2067 // if the parameter is null, the id goes on the attribute def, otherwise 2068 // it is a parameter binding and goes in the binding map. 2069 if (rowParameter == null) { 2070 this._log.debug(compoundName + " primary: " + rowId); 2071 attrib._id = rowId; 2072 idToAttribAndParam[rowId] = [attrib, null]; 2073 } else { 2074 this._log.debug( 2075 compoundName + " binding: " + rowParameter + " = " + rowId 2076 ); 2077 attrib._parameterBindings[rowParameter] = rowId; 2078 idToAttribAndParam[rowId] = [attrib, rowParameter]; 2079 } 2080 } 2081 stmt.finalize(); 2082 2083 this._log.info("done loading all attribute defs"); 2084 2085 this._attributeDBDefs = attribs; 2086 this._attributeIDToDBDefAndParam = idToAttribAndParam; 2087 }, 2088 2089 /** 2090 * Helper method for GlodaAttributeDBDef to tell us when their bindParameter 2091 * method is called and they have created a new binding (using 2092 * GlodaDatastore._createAttributeDef). In theory, that method could take 2093 * an additional argument and obviate the need for this method. 2094 */ 2095 reportBinding(aID, aAttrDef, aParamValue) { 2096 this._attributeIDToDBDefAndParam[aID] = [aAttrDef, aParamValue]; 2097 }, 2098 2099 /* ********** Folders ********** */ 2100 /** next folder (row) id to issue, populated by _getAllFolderMappings. */ 2101 _nextFolderId: 1, 2102 2103 get _insertFolderLocationStatement() { 2104 let statement = this._createAsyncStatement( 2105 "INSERT INTO folderLocations (id, folderURI, dirtyStatus, name, \ 2106 indexingPriority) VALUES \ 2107 (?1, ?2, ?3, ?4, ?5)" 2108 ); 2109 this.__defineGetter__("_insertFolderLocationStatement", () => statement); 2110 return this._insertFolderLocationStatement; 2111 }, 2112 2113 /** 2114 * Authoritative map from folder URI to folder ID. (Authoritative in the 2115 * sense that this map exactly represents the state of the underlying 2116 * database. If it does not, it's a bug in updating the database.) 2117 */ 2118 _folderByURI: {}, 2119 /** Authoritative map from folder ID to folder URI */ 2120 _folderByID: {}, 2121 2122 /** Initialize our _folderByURI/_folderByID mappings, called by _init(). */ 2123 _getAllFolderMappings() { 2124 let stmt = this._createSyncStatement( 2125 "SELECT id, folderURI, dirtyStatus, name, indexingPriority \ 2126 FROM folderLocations", 2127 true 2128 ); 2129 2130 while (stmt.executeStep()) { 2131 // no chance of this SQLITE_BUSY on this call 2132 let folderID = stmt.getInt64(0); 2133 let folderURI = stmt.getString(1); 2134 let dirtyStatus = stmt.getInt32(2); 2135 let folderName = stmt.getString(3); 2136 let indexingPriority = stmt.getInt32(4); 2137 2138 let folder = new GlodaFolder( 2139 this, 2140 folderID, 2141 folderURI, 2142 dirtyStatus, 2143 folderName, 2144 indexingPriority 2145 ); 2146 2147 this._folderByURI[folderURI] = folder; 2148 this._folderByID[folderID] = folder; 2149 2150 if (folderID >= this._nextFolderId) { 2151 this._nextFolderId = folderID + 1; 2152 } 2153 } 2154 stmt.finalize(); 2155 }, 2156 2157 _folderKnown(aFolder) { 2158 let folderURI = aFolder.URI; 2159 return folderURI in this._folderByURI; 2160 }, 2161 2162 _folderIdKnown(aFolderID) { 2163 return aFolderID in this._folderByID; 2164 }, 2165 2166 /** 2167 * Return the default messaging priority for a folder of this type, based 2168 * on the folder's flags. If aAllowSpecialFolderIndexing is true, then 2169 * folders suchs as Trash and Junk will be indexed. 2170 * 2171 * @param {nsIMsgFolder} aFolder 2172 * @param {boolean} aAllowSpecialFolderIndexing 2173 * @returns {Number} 2174 */ 2175 getDefaultIndexingPriority(aFolder, aAllowSpecialFolderIndexing) { 2176 let indexingPriority = GlodaFolder.prototype.kIndexingDefaultPriority; 2177 // Do not walk into trash/junk folders, unless the user is explicitly 2178 // telling us to do so. 2179 let specialFolderFlags = 2180 Ci.nsMsgFolderFlags.Trash | Ci.nsMsgFolderFlags.Junk; 2181 if (aFolder.isSpecialFolder(specialFolderFlags, true)) { 2182 indexingPriority = aAllowSpecialFolderIndexing 2183 ? GlodaFolder.prototype.kIndexingDefaultPriority 2184 : GlodaFolder.prototype.kIndexingNeverPriority; 2185 } else if ( 2186 aFolder.flags & 2187 (Ci.nsMsgFolderFlags.Queue | Ci.nsMsgFolderFlags.Newsgroup) 2188 // In unit testing at least folders can be 2189 // confusingly labeled ImapPublic when they 2190 // should not be. Or at least I don't think they 2191 // should be. So they're legit for now. 2192 // | Ci.nsMsgFolderFlags.ImapPublic 2193 // | Ci.nsMsgFolderFlags.ImapOtherUser 2194 ) { 2195 // Queue folders should always be ignored just because messages should not 2196 // spend much time in there. 2197 // We hate newsgroups, and public IMAP folders are similar. 2198 // Other user IMAP folders should be ignored because it's not this user's 2199 // mail. 2200 indexingPriority = GlodaFolder.prototype.kIndexingNeverPriority; 2201 } else if (aFolder.flags & Ci.nsMsgFolderFlags.Inbox) { 2202 indexingPriority = GlodaFolder.prototype.kIndexingInboxPriority; 2203 } else if (aFolder.flags & Ci.nsMsgFolderFlags.SentMail) { 2204 indexingPriority = GlodaFolder.prototype.kIndexingSentMailPriority; 2205 } else if (aFolder.flags & Ci.nsMsgFolderFlags.Favorite) { 2206 indexingPriority = GlodaFolder.prototype.kIndexingFavoritePriority; 2207 } else if (aFolder.flags & Ci.nsMsgFolderFlags.CheckNew) { 2208 indexingPriority = GlodaFolder.prototype.kIndexingCheckNewPriority; 2209 } 2210 2211 return indexingPriority; 2212 }, 2213 2214 /** 2215 * Map a folder URI to a GlodaFolder instance, creating the mapping if it does 2216 * not yet exist. 2217 * 2218 * @param aFolder The nsIMsgFolder instance you would like the GlodaFolder 2219 * instance for. 2220 * @returns The existing or newly created GlodaFolder instance. 2221 */ 2222 _mapFolder(aFolder) { 2223 let folderURI = aFolder.URI; 2224 if (folderURI in this._folderByURI) { 2225 return this._folderByURI[folderURI]; 2226 } 2227 2228 let folderID = this._nextFolderId++; 2229 2230 // if there's an indexingPriority stored on the folder, just use that 2231 let indexingPriority; 2232 let stringPrio = aFolder.getStringProperty("indexingPriority"); 2233 if (stringPrio.length) { 2234 indexingPriority = parseInt(stringPrio); 2235 } else { 2236 // Otherwise, fall back to the default for folders of this type. 2237 indexingPriority = this.getDefaultIndexingPriority(aFolder); 2238 } 2239 2240 // If there are messages in the folder, it is filthy. If there are no 2241 // messages, it can be clean. 2242 let dirtyStatus = aFolder.getTotalMessages(false) 2243 ? GlodaFolder.prototype.kFolderFilthy 2244 : GlodaFolder.prototype.kFolderClean; 2245 let folder = new GlodaFolder( 2246 this, 2247 folderID, 2248 folderURI, 2249 dirtyStatus, 2250 aFolder.prettyName, 2251 indexingPriority 2252 ); 2253 2254 this._insertFolderLocationStatement.bindByIndex(0, folder.id); 2255 this._insertFolderLocationStatement.bindByIndex(1, folder.uri); 2256 this._insertFolderLocationStatement.bindByIndex(2, folder.dirtyStatus); 2257 this._insertFolderLocationStatement.bindByIndex(3, folder.name); 2258 this._insertFolderLocationStatement.bindByIndex(4, folder.indexingPriority); 2259 this._insertFolderLocationStatement.executeAsync(this.trackAsync()); 2260 2261 this._folderByURI[folderURI] = folder; 2262 this._folderByID[folderID] = folder; 2263 this._log.debug("!! mapped " + folder.id + " from " + folderURI); 2264 return folder; 2265 }, 2266 2267 /** 2268 * Map an integer gloda folder ID to the corresponding GlodaFolder instance. 2269 * 2270 * @param aFolderID The known valid gloda folder ID for which you would like 2271 * a GlodaFolder instance. 2272 * @return The GlodaFolder instance with the given id. If no such instance 2273 * exists, we will throw an exception. 2274 */ 2275 _mapFolderID(aFolderID) { 2276 if (aFolderID === null) { 2277 return null; 2278 } 2279 if (aFolderID in this._folderByID) { 2280 return this._folderByID[aFolderID]; 2281 } 2282 throw new Error("Got impossible folder ID: " + aFolderID); 2283 }, 2284 2285 /** 2286 * Mark the gloda folder as deleted for any outstanding references to it and 2287 * remove it from our tables so we don't hand out any new references. The 2288 * latter is especially important in the case a folder with the same name 2289 * is created afterwards; we don't want to confuse the new one with the old 2290 * one! 2291 */ 2292 _killGlodaFolderIntoTombstone(aGlodaFolder) { 2293 aGlodaFolder._deleted = true; 2294 delete this._folderByURI[aGlodaFolder.uri]; 2295 delete this._folderByID[aGlodaFolder.id]; 2296 }, 2297 2298 get _updateFolderDirtyStatusStatement() { 2299 let statement = this._createAsyncStatement( 2300 "UPDATE folderLocations SET dirtyStatus = ?1 \ 2301 WHERE id = ?2" 2302 ); 2303 this.__defineGetter__("_updateFolderDirtyStatusStatement", () => statement); 2304 return this._updateFolderDirtyStatusStatement; 2305 }, 2306 2307 updateFolderDirtyStatus(aFolder) { 2308 let ufds = this._updateFolderDirtyStatusStatement; 2309 ufds.bindByIndex(1, aFolder.id); 2310 ufds.bindByIndex(0, aFolder.dirtyStatus); 2311 ufds.executeAsync(this.trackAsync()); 2312 }, 2313 2314 get _updateFolderIndexingPriorityStatement() { 2315 let statement = this._createAsyncStatement( 2316 "UPDATE folderLocations SET indexingPriority = ?1 \ 2317 WHERE id = ?2" 2318 ); 2319 this.__defineGetter__( 2320 "_updateFolderIndexingPriorityStatement", 2321 () => statement 2322 ); 2323 return this._updateFolderIndexingPriorityStatement; 2324 }, 2325 2326 updateFolderIndexingPriority(aFolder) { 2327 let ufip = this._updateFolderIndexingPriorityStatement; 2328 ufip.bindByIndex(1, aFolder.id); 2329 ufip.bindByIndex(0, aFolder.indexingPriority); 2330 ufip.executeAsync(this.trackAsync()); 2331 }, 2332 2333 get _updateFolderLocationStatement() { 2334 let statement = this._createAsyncStatement( 2335 "UPDATE folderLocations SET folderURI = ?1 \ 2336 WHERE id = ?2" 2337 ); 2338 this.__defineGetter__("_updateFolderLocationStatement", () => statement); 2339 return this._updateFolderLocationStatement; 2340 }, 2341 2342 /** 2343 * Non-recursive asynchronous folder renaming based on the URI. 2344 * 2345 * @TODO provide a mechanism for recursive folder renames or have a higher 2346 * layer deal with it and remove this note. 2347 */ 2348 renameFolder(aOldFolder, aNewURI) { 2349 if (!(aOldFolder.URI in this._folderByURI)) { 2350 return; 2351 } 2352 let folder = this._mapFolder(aOldFolder); // ensure the folder is mapped 2353 let oldURI = folder.uri; 2354 this._folderByURI[aNewURI] = folder; 2355 folder._uri = aNewURI; 2356 this._log.info("renaming folder URI " + oldURI + " to " + aNewURI); 2357 this._updateFolderLocationStatement.bindByIndex(1, folder.id); 2358 this._updateFolderLocationStatement.bindByIndex(0, aNewURI); 2359 this._updateFolderLocationStatement.executeAsync(this.trackAsync()); 2360 2361 delete this._folderByURI[oldURI]; 2362 }, 2363 2364 get _deleteFolderByIDStatement() { 2365 let statement = this._createAsyncStatement( 2366 "DELETE FROM folderLocations WHERE id = ?1" 2367 ); 2368 this.__defineGetter__("_deleteFolderByIDStatement", () => statement); 2369 return this._deleteFolderByIDStatement; 2370 }, 2371 2372 deleteFolderByID(aFolderID) { 2373 let dfbis = this._deleteFolderByIDStatement; 2374 dfbis.bindByIndex(0, aFolderID); 2375 dfbis.executeAsync(this.trackAsync()); 2376 }, 2377 2378 /** 2379 * This timer drives our folder cleanup logic that is in charge of dropping 2380 * our folder references and more importantly the folder's msgDatabase 2381 * reference, but only if they are no longer in use. 2382 * This timer is only active when we have one or more live gloda folders (as 2383 * tracked by _liveGlodaFolders). Although we choose our timer interval to 2384 * be power-friendly, it doesn't really matter because unless the user or the 2385 * indexing process is actively doing things, all of the folders will 'die' 2386 * and so we will stop scheduling the timer. 2387 */ 2388 _folderCleanupTimer: null, 2389 2390 /** 2391 * When true, we have a folder cleanup timer event active. 2392 */ 2393 _folderCleanupActive: false, 2394 2395 /** 2396 * Interval at which we call the folder cleanup code, in milliseconds. 2397 */ 2398 _folderCleanupTimerInterval: 2000, 2399 2400 /** 2401 * Maps the id of 'live' GlodaFolders to the instances. If a GlodaFolder is 2402 * in here, it means that it has a reference to its nsIMsgDBFolder which 2403 * should have an open nsIMsgDatabase that we will need to close. This does 2404 * not count folders that are being indexed unless they have also been used 2405 * for header retrieval. 2406 */ 2407 _liveGlodaFolders: {}, 2408 2409 /** 2410 * Mark a GlodaFolder as having a live reference to its nsIMsgFolder with an 2411 * implied opened associated message database. GlodaFolder calls this when 2412 * it first acquires its reference. It is removed from the list of live 2413 * folders only when our timer check calls the GlodaFolder's 2414 * forgetFolderIfUnused method and that method returns true. 2415 */ 2416 markFolderLive(aGlodaFolder) { 2417 this._liveGlodaFolders[aGlodaFolder.id] = aGlodaFolder; 2418 if (!this._folderCleanupActive) { 2419 this._folderCleanupTimer.initWithCallback( 2420 this._performFolderCleanup, 2421 this._folderCleanupTimerInterval, 2422 Ci.nsITimer.TYPE_REPEATING_SLACK 2423 ); 2424 this._folderCleanupActive = true; 2425 } 2426 }, 2427 2428 /** 2429 * Timer-driven folder cleanup logic. For every live folder tracked in 2430 * _liveGlodaFolders, we call their forgetFolderIfUnused method each time 2431 * until they return true indicating they have cleaned themselves up. 2432 * This method is called without a 'this' context! 2433 */ 2434 _performFolderCleanup() { 2435 // we only need to keep going if there is at least one folder in the table 2436 // that is still alive after this pass. 2437 let keepGoing = false; 2438 for (let id in GlodaDatastore._liveGlodaFolders) { 2439 let glodaFolder = GlodaDatastore._liveGlodaFolders[id]; 2440 // returns true if it is now 'dead' and doesn't need this heartbeat check 2441 if (glodaFolder.forgetFolderIfUnused()) { 2442 delete GlodaDatastore._liveGlodaFolders[glodaFolder.id]; 2443 } else { 2444 keepGoing = true; 2445 } 2446 } 2447 2448 if (!keepGoing) { 2449 GlodaDatastore._folderCleanupTimer.cancel(); 2450 GlodaDatastore._folderCleanupActive = false; 2451 } 2452 }, 2453 2454 /* ********** Conversation ********** */ 2455 /** The next conversation id to allocate. Initialize at startup. */ 2456 _nextConversationId: 1, 2457 2458 _populateConversationManagedId() { 2459 let stmt = this._createSyncStatement( 2460 "SELECT MAX(id) FROM conversations", 2461 true 2462 ); 2463 if (stmt.executeStep()) { 2464 // no chance of this SQLITE_BUSY on this call 2465 this._nextConversationId = stmt.getInt64(0) + 1; 2466 } 2467 stmt.finalize(); 2468 }, 2469 2470 get _insertConversationStatement() { 2471 let statement = this._createAsyncStatement( 2472 "INSERT INTO conversations (id, subject, oldestMessageDate, \ 2473 newestMessageDate) \ 2474 VALUES (?1, ?2, ?3, ?4)" 2475 ); 2476 this.__defineGetter__("_insertConversationStatement", () => statement); 2477 return this._insertConversationStatement; 2478 }, 2479 2480 get _insertConversationTextStatement() { 2481 let statement = this._createAsyncStatement( 2482 "INSERT INTO conversationsText (docid, subject) \ 2483 VALUES (?1, ?2)" 2484 ); 2485 this.__defineGetter__("_insertConversationTextStatement", () => statement); 2486 return this._insertConversationTextStatement; 2487 }, 2488 2489 /** 2490 * Asynchronously create a conversation. 2491 */ 2492 createConversation(aSubject, aOldestMessageDate, aNewestMessageDate) { 2493 // create the data row 2494 let conversationID = this._nextConversationId++; 2495 let ics = this._insertConversationStatement; 2496 ics.bindByIndex(0, conversationID); 2497 ics.bindByIndex(1, aSubject); 2498 if (aOldestMessageDate == null) { 2499 ics.bindByIndex(2, null); 2500 } else { 2501 ics.bindByIndex(2, aOldestMessageDate); 2502 } 2503 if (aNewestMessageDate == null) { 2504 ics.bindByIndex(3, null); 2505 } else { 2506 ics.bindByIndex(3, aNewestMessageDate); 2507 } 2508 ics.executeAsync(this.trackAsync()); 2509 2510 // create the fulltext row, using the same rowid/docid 2511 let icts = this._insertConversationTextStatement; 2512 icts.bindByIndex(0, conversationID); 2513 icts.bindByIndex(1, aSubject); 2514 icts.executeAsync(this.trackAsync()); 2515 2516 // create it 2517 let conversation = new GlodaConversation( 2518 this, 2519 conversationID, 2520 aSubject, 2521 aOldestMessageDate, 2522 aNewestMessageDate 2523 ); 2524 // it's new! let the collection manager know about it. 2525 GlodaCollectionManager.itemsAdded(conversation.NOUN_ID, [conversation]); 2526 // return it 2527 return conversation; 2528 }, 2529 2530 get _deleteConversationByIDStatement() { 2531 let statement = this._createAsyncStatement( 2532 "DELETE FROM conversations WHERE id = ?1" 2533 ); 2534 this.__defineGetter__("_deleteConversationByIDStatement", () => statement); 2535 return this._deleteConversationByIDStatement; 2536 }, 2537 2538 /** 2539 * Asynchronously delete a conversation given its ID. 2540 */ 2541 deleteConversationByID(aConversationID) { 2542 let dcbids = this._deleteConversationByIDStatement; 2543 dcbids.bindByIndex(0, aConversationID); 2544 dcbids.executeAsync(this.trackAsync()); 2545 2546 GlodaCollectionManager.itemsDeleted(GlodaConversation.prototype.NOUN_ID, [ 2547 aConversationID, 2548 ]); 2549 }, 2550 2551 _conversationFromRow(aStmt) { 2552 let oldestMessageDate, newestMessageDate; 2553 if (aStmt.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 2554 oldestMessageDate = null; 2555 } else { 2556 oldestMessageDate = aStmt.getInt64(2); 2557 } 2558 if (aStmt.getTypeOfIndex(3) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 2559 newestMessageDate = null; 2560 } else { 2561 newestMessageDate = aStmt.getInt64(3); 2562 } 2563 return new GlodaConversation( 2564 this, 2565 aStmt.getInt64(0), 2566 aStmt.getString(1), 2567 oldestMessageDate, 2568 newestMessageDate 2569 ); 2570 }, 2571 2572 /* ********** Message ********** */ 2573 /** 2574 * Next message id, managed because of our use of asynchronous inserts. 2575 * Initialized by _populateMessageManagedId called by _init. 2576 * 2577 * Start from 32 to leave us all kinds of magical sentinel values at the 2578 * bottom. 2579 */ 2580 _nextMessageId: 32, 2581 2582 _populateMessageManagedId() { 2583 let stmt = this._createSyncStatement("SELECT MAX(id) FROM messages", true); 2584 if (stmt.executeStep()) { 2585 // no chance of this SQLITE_BUSY on this call 2586 // 0 gets returned even if there are no messages... 2587 let highestSeen = stmt.getInt64(0); 2588 if (highestSeen != 0) { 2589 this._nextMessageId = highestSeen + 1; 2590 } 2591 } 2592 stmt.finalize(); 2593 }, 2594 2595 get _insertMessageStatement() { 2596 let statement = this._createAsyncStatement( 2597 "INSERT INTO messages (id, folderID, messageKey, conversationID, date, \ 2598 headerMessageID, jsonAttributes, notability) \ 2599 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)" 2600 ); 2601 this.__defineGetter__("_insertMessageStatement", () => statement); 2602 return this._insertMessageStatement; 2603 }, 2604 2605 get _insertMessageTextStatement() { 2606 let statement = this._createAsyncStatement( 2607 "INSERT INTO messagesText (docid, subject, body, attachmentNames, \ 2608 author, recipients) \ 2609 VALUES (?1, ?2, ?3, ?4, ?5, ?6)" 2610 ); 2611 this.__defineGetter__("_insertMessageTextStatement", () => statement); 2612 return this._insertMessageTextStatement; 2613 }, 2614 2615 /** 2616 * Create a GlodaMessage with the given properties. Because this is only half 2617 * of the process of creating a message (the attributes still need to be 2618 * completed), it's on the caller's head to call GlodaCollectionManager's 2619 * itemAdded method once the message is fully created. 2620 * 2621 * This method uses the async connection, any downstream logic that depends on 2622 * this message actually existing in the database must be done using an 2623 * async query. 2624 */ 2625 createMessage( 2626 aFolder, 2627 aMessageKey, 2628 aConversationID, 2629 aDatePRTime, 2630 aHeaderMessageID 2631 ) { 2632 let folderID; 2633 if (aFolder != null) { 2634 folderID = this._mapFolder(aFolder).id; 2635 } else { 2636 folderID = null; 2637 } 2638 2639 let messageID = this._nextMessageId++; 2640 2641 let message = new GlodaMessage( 2642 this, 2643 messageID, 2644 folderID, 2645 aMessageKey, 2646 aConversationID, 2647 /* conversation */ null, 2648 aDatePRTime ? new Date(aDatePRTime / 1000) : null, 2649 aHeaderMessageID, 2650 /* deleted */ false, 2651 /* jsonText */ undefined, 2652 /* notability*/ 0 2653 ); 2654 2655 // We would love to notify the collection manager about the message at this 2656 // point (at least if it's not a ghost), but we can't yet. We need to wait 2657 // until the attributes have been indexed, which means it's out of our 2658 // hands. (Gloda.processMessage does it.) 2659 2660 return message; 2661 }, 2662 2663 insertMessage(aMessage) { 2664 let ims = this._insertMessageStatement; 2665 ims.bindByIndex(0, aMessage.id); 2666 if (aMessage.folderID == null) { 2667 ims.bindByIndex(1, null); 2668 } else { 2669 ims.bindByIndex(1, aMessage.folderID); 2670 } 2671 if (aMessage.messageKey == null) { 2672 ims.bindByIndex(2, null); 2673 } else { 2674 ims.bindByIndex(2, aMessage.messageKey); 2675 } 2676 ims.bindByIndex(3, aMessage.conversationID); 2677 if (aMessage.date == null) { 2678 ims.bindByIndex(4, null); 2679 } else { 2680 ims.bindByIndex(4, aMessage.date * 1000); 2681 } 2682 ims.bindByIndex(5, aMessage.headerMessageID); 2683 if (aMessage._jsonText) { 2684 ims.bindByIndex(6, aMessage._jsonText); 2685 } else { 2686 ims.bindByIndex(6, null); 2687 } 2688 ims.bindByIndex(7, aMessage.notability); 2689 2690 try { 2691 ims.executeAsync(this.trackAsync()); 2692 } catch (ex) { 2693 throw new Error( 2694 "error executing statement... " + 2695 this.asyncConnection.lastError + 2696 ": " + 2697 this.asyncConnection.lastErrorString + 2698 " - " + 2699 ex 2700 ); 2701 } 2702 2703 // we create the full-text row for any message that isn't a ghost, 2704 // whether we have the body or not 2705 if (aMessage.folderID !== null) { 2706 this._insertMessageText(aMessage); 2707 } 2708 }, 2709 2710 /** 2711 * Inserts a full-text row. This should only be called if you're sure you want 2712 * to insert a row into the table. 2713 */ 2714 _insertMessageText(aMessage) { 2715 if (aMessage._content && aMessage._content.hasContent()) { 2716 aMessage._indexedBodyText = aMessage._content.getContentString(true); 2717 } else if (aMessage._bodyLines) { 2718 aMessage._indexedBodyText = aMessage._bodyLines.join("\n"); 2719 } else { 2720 aMessage._indexedBodyText = null; 2721 } 2722 2723 let imts = this._insertMessageTextStatement; 2724 imts.bindByIndex(0, aMessage.id); 2725 imts.bindByIndex(1, aMessage._subject); 2726 if (aMessage._indexedBodyText == null) { 2727 imts.bindByIndex(2, null); 2728 } else { 2729 imts.bindByIndex(2, aMessage._indexedBodyText); 2730 } 2731 if (aMessage._attachmentNames === null) { 2732 imts.bindByIndex(3, null); 2733 } else { 2734 imts.bindByIndex(3, aMessage._attachmentNames.join("\n")); 2735 } 2736 2737 // if (aMessage._indexAuthor) 2738 imts.bindByIndex(4, aMessage._indexAuthor); 2739 // if (aMessage._indexRecipients) 2740 imts.bindByIndex(5, aMessage._indexRecipients); 2741 2742 try { 2743 imts.executeAsync(this.trackAsync()); 2744 } catch (ex) { 2745 throw new Error( 2746 "error executing fulltext statement... " + 2747 this.asyncConnection.lastError + 2748 ": " + 2749 this.asyncConnection.lastErrorString + 2750 " - " + 2751 ex 2752 ); 2753 } 2754 }, 2755 2756 get _updateMessageStatement() { 2757 let statement = this._createAsyncStatement( 2758 "UPDATE messages SET folderID = ?1, \ 2759 messageKey = ?2, \ 2760 conversationID = ?3, \ 2761 date = ?4, \ 2762 headerMessageID = ?5, \ 2763 jsonAttributes = ?6, \ 2764 notability = ?7, \ 2765 deleted = ?8 \ 2766 WHERE id = ?9" 2767 ); 2768 this.__defineGetter__("_updateMessageStatement", () => statement); 2769 return this._updateMessageStatement; 2770 }, 2771 2772 get _updateMessageTextStatement() { 2773 let statement = this._createAsyncStatement( 2774 "UPDATE messagesText SET body = ?1, \ 2775 attachmentNames = ?2 \ 2776 WHERE docid = ?3" 2777 ); 2778 2779 this.__defineGetter__("_updateMessageTextStatement", () => statement); 2780 return this._updateMessageTextStatement; 2781 }, 2782 2783 /** 2784 * Update the database row associated with the message. If the message is 2785 * not a ghost and has _isNew defined, messagesText is affected. 2786 * 2787 * aMessage._isNew is currently equivalent to the fact that there is no 2788 * full-text row associated with this message, and we work with this 2789 * assumption here. Note that if aMessage._isNew is not defined, then 2790 * we don't do anything. 2791 */ 2792 updateMessage(aMessage) { 2793 let ums = this._updateMessageStatement; 2794 ums.bindByIndex(8, aMessage.id); 2795 if (aMessage.folderID === null) { 2796 ums.bindByIndex(0, null); 2797 } else { 2798 ums.bindByIndex(0, aMessage.folderID); 2799 } 2800 if (aMessage.messageKey === null) { 2801 ums.bindByIndex(1, null); 2802 } else { 2803 ums.bindByIndex(1, aMessage.messageKey); 2804 } 2805 ums.bindByIndex(2, aMessage.conversationID); 2806 if (aMessage.date === null) { 2807 ums.bindByIndex(3, null); 2808 } else { 2809 ums.bindByIndex(3, aMessage.date * 1000); 2810 } 2811 ums.bindByIndex(4, aMessage.headerMessageID); 2812 if (aMessage._jsonText) { 2813 ums.bindByIndex(5, aMessage._jsonText); 2814 } else { 2815 ums.bindByIndex(5, null); 2816 } 2817 ums.bindByIndex(6, aMessage.notability); 2818 ums.bindByIndex(7, aMessage._isDeleted ? 1 : 0); 2819 2820 ums.executeAsync(this.trackAsync()); 2821 2822 if (aMessage.folderID !== null) { 2823 if ("_isNew" in aMessage && aMessage._isNew === true) { 2824 this._insertMessageText(aMessage); 2825 } else { 2826 this._updateMessageText(aMessage); 2827 } 2828 } 2829 }, 2830 2831 /** 2832 * Updates the full-text row associated with this message. This only performs 2833 * the UPDATE query if the indexed body text has changed, which means that if 2834 * the body hasn't changed but the attachments have, we don't update. 2835 */ 2836 _updateMessageText(aMessage) { 2837 let newIndexedBodyText; 2838 if (aMessage._content && aMessage._content.hasContent()) { 2839 newIndexedBodyText = aMessage._content.getContentString(true); 2840 } else if (aMessage._bodyLines) { 2841 newIndexedBodyText = aMessage._bodyLines.join("\n"); 2842 } else { 2843 newIndexedBodyText = null; 2844 } 2845 2846 // If the body text matches, don't perform an update 2847 if (newIndexedBodyText == aMessage._indexedBodyText) { 2848 this._log.debug( 2849 "in _updateMessageText, skipping update because body matches" 2850 ); 2851 return; 2852 } 2853 2854 aMessage._indexedBodyText = newIndexedBodyText; 2855 let umts = this._updateMessageTextStatement; 2856 umts.bindByIndex(2, aMessage.id); 2857 2858 if (aMessage._indexedBodyText == null) { 2859 umts.bindByIndex(0, null); 2860 } else { 2861 umts.bindByIndex(0, aMessage._indexedBodyText); 2862 } 2863 2864 if (aMessage._attachmentNames == null) { 2865 umts.bindByIndex(1, null); 2866 } else { 2867 umts.bindByIndex(1, aMessage._attachmentNames.join("\n")); 2868 } 2869 2870 try { 2871 umts.executeAsync(this.trackAsync()); 2872 } catch (ex) { 2873 throw new Error( 2874 "error executing fulltext statement... " + 2875 this.asyncConnection.lastError + 2876 ": " + 2877 this.asyncConnection.lastErrorString + 2878 " - " + 2879 ex 2880 ); 2881 } 2882 }, 2883 2884 get _updateMessageLocationStatement() { 2885 let statement = this._createAsyncStatement( 2886 "UPDATE messages SET folderID = ?1, messageKey = ?2 WHERE id = ?3" 2887 ); 2888 this.__defineGetter__("_updateMessageLocationStatement", () => statement); 2889 return this._updateMessageLocationStatement; 2890 }, 2891 2892 /** 2893 * Given a list of gloda message ids, and a list of their new message keys in 2894 * the given new folder location, asynchronously update the message's 2895 * database locations. Also, update the in-memory representations. 2896 */ 2897 updateMessageLocations( 2898 aMessageIds, 2899 aNewMessageKeys, 2900 aDestFolder, 2901 aDoNotNotify 2902 ) { 2903 let statement = this._updateMessageLocationStatement; 2904 let destFolderID = 2905 typeof aDestFolder == "number" 2906 ? aDestFolder 2907 : this._mapFolder(aDestFolder).id; 2908 2909 // map gloda id to the new message key for in-memory rep transform below 2910 let cacheLookupMap = {}; 2911 2912 for (let iMsg = 0; iMsg < aMessageIds.length; iMsg++) { 2913 let id = aMessageIds[iMsg], 2914 msgKey = aNewMessageKeys[iMsg]; 2915 statement.bindByIndex(0, destFolderID); 2916 statement.bindByIndex(1, msgKey); 2917 statement.bindByIndex(2, id); 2918 statement.executeAsync(this.trackAsync()); 2919 2920 cacheLookupMap[id] = msgKey; 2921 } 2922 2923 // - perform the cache lookup so we can update in-memory representations 2924 // found in memory items, and converted to list form for notification 2925 let inMemoryItems = {}, 2926 modifiedItems = []; 2927 GlodaCollectionManager.cacheLookupMany( 2928 GlodaMessage.prototype.NOUN_ID, 2929 cacheLookupMap, 2930 inMemoryItems, 2931 /* do not cache */ false 2932 ); 2933 for (let glodaId in inMemoryItems) { 2934 let glodaMsg = inMemoryItems[glodaId]; 2935 glodaMsg._folderID = destFolderID; 2936 glodaMsg._messageKey = cacheLookupMap[glodaId]; 2937 modifiedItems.push(glodaMsg); 2938 } 2939 2940 // tell the collection manager about the modified messages so it can update 2941 // any existing views... 2942 if (!aDoNotNotify && modifiedItems.length) { 2943 GlodaCollectionManager.itemsModified( 2944 GlodaMessage.prototype.NOUN_ID, 2945 modifiedItems 2946 ); 2947 } 2948 }, 2949 2950 get _updateMessageKeyStatement() { 2951 let statement = this._createAsyncStatement( 2952 "UPDATE messages SET messageKey = ?1 WHERE id = ?2" 2953 ); 2954 this.__defineGetter__("_updateMessageKeyStatement", () => statement); 2955 return this._updateMessageKeyStatement; 2956 }, 2957 2958 /** 2959 * Update the message keys for the gloda messages with the given id's. This 2960 * is to be used in response to msgKeyChanged notifications and is similar to 2961 * `updateMessageLocations` except that we do not update the folder and we 2962 * do not perform itemsModified notifications (because message keys are not 2963 * intended to be relevant to the gloda message abstraction). 2964 */ 2965 updateMessageKeys(aMessageIds, aNewMessageKeys) { 2966 let statement = this._updateMessageKeyStatement; 2967 2968 // map gloda id to the new message key for in-memory rep transform below 2969 let cacheLookupMap = {}; 2970 2971 for (let iMsg = 0; iMsg < aMessageIds.length; iMsg++) { 2972 let id = aMessageIds[iMsg], 2973 msgKey = aNewMessageKeys[iMsg]; 2974 statement.bindByIndex(0, msgKey); 2975 statement.bindByIndex(1, id); 2976 statement.executeAsync(this.trackAsync()); 2977 2978 cacheLookupMap[id] = msgKey; 2979 } 2980 2981 // - perform the cache lookup so we can update in-memory representations 2982 let inMemoryItems = {}; 2983 GlodaCollectionManager.cacheLookupMany( 2984 GlodaMessage.prototype.NOUN_ID, 2985 cacheLookupMap, 2986 inMemoryItems, 2987 /* do not cache */ false 2988 ); 2989 for (let glodaId in inMemoryItems) { 2990 let glodaMsg = inMemoryItems[glodaId]; 2991 glodaMsg._messageKey = cacheLookupMap[glodaId]; 2992 } 2993 }, 2994 2995 /** 2996 * Asynchronously mutate message folder id/message keys for the given 2997 * messages, indicating that we are moving them to the target folder, but 2998 * don't yet know their target message keys. 2999 * 3000 * Updates in-memory representations too. 3001 */ 3002 updateMessageFoldersByKeyPurging(aGlodaIds, aDestFolder) { 3003 let destFolderID = this._mapFolder(aDestFolder).id; 3004 3005 let sqlStr = 3006 "UPDATE messages SET folderID = ?1, \ 3007 messageKey = ?2 \ 3008 WHERE id IN (" + 3009 aGlodaIds.join(", ") + 3010 ")"; 3011 let statement = this._createAsyncStatement(sqlStr, true); 3012 statement.bindByIndex(0, destFolderID); 3013 statement.bindByIndex(1, null); 3014 statement.executeAsync(this.trackAsync()); 3015 statement.finalize(); 3016 3017 let cached = GlodaCollectionManager.cacheLookupManyList( 3018 GlodaMessage.prototype.NOUN_ID, 3019 aGlodaIds 3020 ); 3021 for (let id in cached) { 3022 let glodaMsg = cached[id]; 3023 glodaMsg._folderID = destFolderID; 3024 glodaMsg._messageKey = null; 3025 } 3026 }, 3027 3028 _messageFromRow(aRow) { 3029 let folderId, 3030 messageKey, 3031 date, 3032 jsonText, 3033 subject, 3034 indexedBodyText, 3035 attachmentNames; 3036 if (aRow.getTypeOfIndex(1) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3037 folderId = null; 3038 } else { 3039 folderId = aRow.getInt64(1); 3040 } 3041 if (aRow.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3042 messageKey = null; 3043 } else { 3044 messageKey = aRow.getInt64(2); 3045 } 3046 if (aRow.getTypeOfIndex(4) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3047 date = null; 3048 } else { 3049 date = new Date(aRow.getInt64(4) / 1000); 3050 } 3051 if (aRow.getTypeOfIndex(7) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3052 jsonText = undefined; 3053 } else { 3054 jsonText = aRow.getString(7); 3055 } 3056 // only queryFromQuery queries will have these columns 3057 if (aRow.numEntries >= 14) { 3058 if (aRow.getTypeOfIndex(10) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3059 subject = undefined; 3060 } else { 3061 subject = aRow.getString(10); 3062 } 3063 if (aRow.getTypeOfIndex(9) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3064 indexedBodyText = undefined; 3065 } else { 3066 indexedBodyText = aRow.getString(9); 3067 } 3068 if (aRow.getTypeOfIndex(11) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 3069 attachmentNames = null; 3070 } else { 3071 attachmentNames = aRow.getString(11); 3072 if (attachmentNames) { 3073 attachmentNames = attachmentNames.split("\n"); 3074 } else { 3075 attachmentNames = null; 3076 } 3077 } 3078 // we ignore 12, author 3079 // we ignore 13, recipients 3080 } 3081 return new GlodaMessage( 3082 this, 3083 aRow.getInt64(0), 3084 folderId, 3085 messageKey, 3086 aRow.getInt64(3), 3087 null, 3088 date, 3089 aRow.getString(5), 3090 aRow.getInt64(6), 3091 jsonText, 3092 aRow.getInt64(8), 3093 subject, 3094 indexedBodyText, 3095 attachmentNames 3096 ); 3097 }, 3098 3099 get _updateMessagesMarkDeletedByFolderID() { 3100 // When marking deleted clear the folderID and messageKey so that the 3101 // indexing process can reuse it without any location constraints. 3102 let statement = this._createAsyncStatement( 3103 "UPDATE messages SET folderID = NULL, messageKey = NULL, \ 3104 deleted = 1 WHERE folderID = ?1" 3105 ); 3106 this.__defineGetter__( 3107 "_updateMessagesMarkDeletedByFolderID", 3108 () => statement 3109 ); 3110 return this._updateMessagesMarkDeletedByFolderID; 3111 }, 3112 3113 /** 3114 * Efficiently mark all the messages in a folder as deleted. Unfortunately, 3115 * we obviously do not know the id's of the messages affected by this which 3116 * complicates in-memory updates. The options are sending out to the SQL 3117 * database for a list of the message id's or some form of in-memory 3118 * traversal. I/O costs being what they are, users having a propensity to 3119 * have folders with tens of thousands of messages, and the unlikeliness 3120 * of all of those messages being gloda-memory-resident, we go with the 3121 * in-memory traversal. 3122 */ 3123 markMessagesDeletedByFolderID(aFolderID) { 3124 let statement = this._updateMessagesMarkDeletedByFolderID; 3125 statement.bindByIndex(0, aFolderID); 3126 statement.executeAsync(this.trackAsync()); 3127 3128 // Have the collection manager generate itemsRemoved events for any 3129 // in-memory messages in that folder. 3130 GlodaCollectionManager.itemsDeletedByAttribute( 3131 GlodaMessage.prototype.NOUN_ID, 3132 aMsg => aMsg._folderID == aFolderID 3133 ); 3134 }, 3135 3136 /** 3137 * Mark all the gloda messages as deleted blind-fire. Check if any of the 3138 * messages are known to the collection manager and update them to be deleted 3139 * along with the requisite collection notifications. 3140 */ 3141 markMessagesDeletedByIDs(aMessageIDs) { 3142 // When marking deleted clear the folderID and messageKey so that the 3143 // indexing process can reuse it without any location constraints. 3144 let sqlString = 3145 "UPDATE messages SET folderID = NULL, messageKey = NULL, " + 3146 "deleted = 1 WHERE id IN (" + 3147 aMessageIDs.join(",") + 3148 ")"; 3149 3150 let statement = this._createAsyncStatement(sqlString, true); 3151 statement.executeAsync(this.trackAsync()); 3152 statement.finalize(); 3153 3154 GlodaCollectionManager.itemsDeleted( 3155 GlodaMessage.prototype.NOUN_ID, 3156 aMessageIDs 3157 ); 3158 }, 3159 3160 get _countDeletedMessagesStatement() { 3161 let statement = this._createAsyncStatement( 3162 "SELECT COUNT(*) FROM messages WHERE deleted = 1" 3163 ); 3164 this.__defineGetter__("_countDeletedMessagesStatement", () => statement); 3165 return this._countDeletedMessagesStatement; 3166 }, 3167 3168 /** 3169 * Count how many messages are currently marked as deleted in the database. 3170 */ 3171 countDeletedMessages(aCallback) { 3172 let cms = this._countDeletedMessagesStatement; 3173 cms.executeAsync(new SingletonResultValueHandler(aCallback)); 3174 }, 3175 3176 get _deleteMessageByIDStatement() { 3177 let statement = this._createAsyncStatement( 3178 "DELETE FROM messages WHERE id = ?1" 3179 ); 3180 this.__defineGetter__("_deleteMessageByIDStatement", () => statement); 3181 return this._deleteMessageByIDStatement; 3182 }, 3183 3184 get _deleteMessageTextByIDStatement() { 3185 let statement = this._createAsyncStatement( 3186 "DELETE FROM messagesText WHERE docid = ?1" 3187 ); 3188 this.__defineGetter__("_deleteMessageTextByIDStatement", () => statement); 3189 return this._deleteMessageTextByIDStatement; 3190 }, 3191 3192 /** 3193 * Delete a message and its fulltext from the database. It is assumed that 3194 * the message was already marked as deleted and so is not visible to the 3195 * collection manager and so nothing needs to be done about that. 3196 */ 3197 deleteMessageByID(aMessageID) { 3198 let dmbids = this._deleteMessageByIDStatement; 3199 dmbids.bindByIndex(0, aMessageID); 3200 dmbids.executeAsync(this.trackAsync()); 3201 3202 this.deleteMessageTextByID(aMessageID); 3203 }, 3204 3205 deleteMessageTextByID(aMessageID) { 3206 let dmt = this._deleteMessageTextByIDStatement; 3207 dmt.bindByIndex(0, aMessageID); 3208 dmt.executeAsync(this.trackAsync()); 3209 }, 3210 3211 get _folderCompactionStatement() { 3212 let statement = this._createAsyncStatement( 3213 "SELECT id, messageKey, headerMessageID FROM messages \ 3214 WHERE folderID = ?1 AND \ 3215 messageKey >= ?2 AND +deleted = 0 ORDER BY messageKey LIMIT ?3" 3216 ); 3217 this.__defineGetter__("_folderCompactionStatement", () => statement); 3218 return this._folderCompactionStatement; 3219 }, 3220 3221 folderCompactionPassBlockFetch( 3222 aFolderID, 3223 aStartingMessageKey, 3224 aLimit, 3225 aCallback 3226 ) { 3227 let fcs = this._folderCompactionStatement; 3228 fcs.bindByIndex(0, aFolderID); 3229 fcs.bindByIndex(1, aStartingMessageKey); 3230 fcs.bindByIndex(2, aLimit); 3231 fcs.executeAsync(new CompactionBlockFetcherHandler(aCallback)); 3232 }, 3233 3234 /* ********** Message Attributes ********** */ 3235 get _insertMessageAttributeStatement() { 3236 let statement = this._createAsyncStatement( 3237 "INSERT INTO messageAttributes (conversationID, messageID, attributeID, \ 3238 value) \ 3239 VALUES (?1, ?2, ?3, ?4)" 3240 ); 3241 this.__defineGetter__("_insertMessageAttributeStatement", () => statement); 3242 return this._insertMessageAttributeStatement; 3243 }, 3244 3245 get _deleteMessageAttributeStatement() { 3246 let statement = this._createAsyncStatement( 3247 "DELETE FROM messageAttributes WHERE attributeID = ?1 AND value = ?2 \ 3248 AND conversationID = ?3 AND messageID = ?4" 3249 ); 3250 this.__defineGetter__("_deleteMessageAttributeStatement", () => statement); 3251 return this._deleteMessageAttributeStatement; 3252 }, 3253 3254 /** 3255 * Insert and remove attributes relating to a GlodaMessage. This is performed 3256 * inside a pseudo-transaction (we create one if we aren't in one, using 3257 * our _beginTransaction wrapper, but if we are in one, no additional 3258 * meaningful semantics are added). 3259 * No attempt is made to verify uniqueness of inserted attributes, either 3260 * against the current database or within the provided list of attributes. 3261 * The caller is responsible for ensuring that unwanted duplicates are 3262 * avoided. 3263 * 3264 * @param aMessage The GlodaMessage the attributes belong to. This is used 3265 * to provide the message id and conversation id. 3266 * @param aAddDBAttributes A list of attribute tuples to add, where each tuple 3267 * contains an attribute ID and a value. Lest you forget, an attribute ID 3268 * corresponds to a row in the attribute definition table. The attribute 3269 * definition table stores the 'parameter' for the attribute, if any. 3270 * (Which is to say, our frequent Attribute-Parameter-Value triple has 3271 * the Attribute-Parameter part distilled to a single attribute id.) 3272 * @param aRemoveDBAttributes A list of attribute tuples to remove. 3273 */ 3274 adjustMessageAttributes(aMessage, aAddDBAttributes, aRemoveDBAttributes) { 3275 let imas = this._insertMessageAttributeStatement; 3276 let dmas = this._deleteMessageAttributeStatement; 3277 this._beginTransaction(); 3278 try { 3279 for (let iAttrib = 0; iAttrib < aAddDBAttributes.length; iAttrib++) { 3280 let attribValueTuple = aAddDBAttributes[iAttrib]; 3281 3282 imas.bindByIndex(0, aMessage.conversationID); 3283 imas.bindByIndex(1, aMessage.id); 3284 imas.bindByIndex(2, attribValueTuple[0]); 3285 // use 0 instead of null, otherwise the db gets upset. (and we don't 3286 // really care anyways.) 3287 if (attribValueTuple[1] == null) { 3288 imas.bindByIndex(3, 0); 3289 } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) { 3290 imas.bindByIndex(3, attribValueTuple[1]); 3291 } else { 3292 imas.bindByIndex(3, attribValueTuple[1]); 3293 } 3294 imas.executeAsync(this.trackAsync()); 3295 } 3296 3297 for (let iAttrib = 0; iAttrib < aRemoveDBAttributes.length; iAttrib++) { 3298 let attribValueTuple = aRemoveDBAttributes[iAttrib]; 3299 3300 dmas.bindByIndex(0, attribValueTuple[0]); 3301 // use 0 instead of null, otherwise the db gets upset. (and we don't 3302 // really care anyways.) 3303 if (attribValueTuple[1] == null) { 3304 dmas.bindByIndex(1, 0); 3305 } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) { 3306 dmas.bindByIndex(1, attribValueTuple[1]); 3307 } else { 3308 dmas.bindByIndex(1, attribValueTuple[1]); 3309 } 3310 dmas.bindByIndex(2, aMessage.conversationID); 3311 dmas.bindByIndex(3, aMessage.id); 3312 dmas.executeAsync(this.trackAsync()); 3313 } 3314 3315 this._commitTransaction(); 3316 } catch (ex) { 3317 this._log.error("adjustMessageAttributes:", ex); 3318 this._rollbackTransaction(); 3319 throw ex; 3320 } 3321 }, 3322 3323 get _deleteMessageAttributesByMessageIDStatement() { 3324 let statement = this._createAsyncStatement( 3325 "DELETE FROM messageAttributes WHERE messageID = ?1" 3326 ); 3327 this.__defineGetter__( 3328 "_deleteMessageAttributesByMessageIDStatement", 3329 () => statement 3330 ); 3331 return this._deleteMessageAttributesByMessageIDStatement; 3332 }, 3333 3334 /** 3335 * Clear all the message attributes for a given GlodaMessage. No changes 3336 * are made to the in-memory representation of the message; it is up to the 3337 * caller to ensure that it handles things correctly. 3338 * 3339 * @param aMessage The GlodaMessage whose database attributes should be 3340 * purged. 3341 */ 3342 clearMessageAttributes(aMessage) { 3343 if (aMessage.id != null) { 3344 this._deleteMessageAttributesByMessageIDStatement.bindByIndex( 3345 0, 3346 aMessage.id 3347 ); 3348 this._deleteMessageAttributesByMessageIDStatement.executeAsync( 3349 this.trackAsync() 3350 ); 3351 } 3352 }, 3353 3354 _stringSQLQuoter(aString) { 3355 return "'" + aString.replace(/\'/g, "''") + "'"; 3356 }, 3357 _numberQuoter(aNum) { 3358 return aNum; 3359 }, 3360 3361 /* ===== Generic Attribute Support ===== */ 3362 adjustAttributes(aItem, aAddDBAttributes, aRemoveDBAttributes) { 3363 let nounDef = aItem.NOUN_DEF; 3364 let dbMeta = nounDef._dbMeta; 3365 if (dbMeta.insertAttrStatement === undefined) { 3366 dbMeta.insertAttrStatement = this._createAsyncStatement( 3367 "INSERT INTO " + 3368 nounDef.attrTableName + 3369 " (" + 3370 nounDef.attrIDColumnName + 3371 ", attributeID, value) " + 3372 " VALUES (?1, ?2, ?3)" 3373 ); 3374 // we always create this at the same time (right here), no need to check 3375 dbMeta.deleteAttrStatement = this._createAsyncStatement( 3376 "DELETE FROM " + 3377 nounDef.attrTableName + 3378 " WHERE " + 3379 " attributeID = ?1 AND value = ?2 AND " + 3380 nounDef.attrIDColumnName + 3381 " = ?3" 3382 ); 3383 } 3384 3385 let ias = dbMeta.insertAttrStatement; 3386 let das = dbMeta.deleteAttrStatement; 3387 this._beginTransaction(); 3388 try { 3389 for (let iAttr = 0; iAttr < aAddDBAttributes.length; iAttr++) { 3390 let attribValueTuple = aAddDBAttributes[iAttr]; 3391 3392 ias.bindByIndex(0, aItem.id); 3393 ias.bindByIndex(1, attribValueTuple[0]); 3394 // use 0 instead of null, otherwise the db gets upset. (and we don't 3395 // really care anyways.) 3396 if (attribValueTuple[1] == null) { 3397 ias.bindByIndex(2, 0); 3398 } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) { 3399 ias.bindByIndex(2, attribValueTuple[1]); 3400 } else { 3401 ias.bindByIndex(2, attribValueTuple[1]); 3402 } 3403 ias.executeAsync(this.trackAsync()); 3404 } 3405 3406 for (let iAttr = 0; iAttr < aRemoveDBAttributes.length; iAttr++) { 3407 let attribValueTuple = aRemoveDBAttributes[iAttr]; 3408 3409 das.bindByIndex(0, attribValueTuple[0]); 3410 // use 0 instead of null, otherwise the db gets upset. (and we don't 3411 // really care anyways.) 3412 if (attribValueTuple[1] == null) { 3413 das.bindByIndex(1, 0); 3414 } else if (Math.floor(attribValueTuple[1]) == attribValueTuple[1]) { 3415 das.bindByIndex(1, attribValueTuple[1]); 3416 } else { 3417 das.bindByIndex(1, attribValueTuple[1]); 3418 } 3419 das.bindByIndex(2, aItem.id); 3420 das.executeAsync(this.trackAsync()); 3421 } 3422 3423 this._commitTransaction(); 3424 } catch (ex) { 3425 this._log.error("adjustAttributes:", ex); 3426 this._rollbackTransaction(); 3427 throw ex; 3428 } 3429 }, 3430 3431 clearAttributes(aItem) { 3432 let nounDef = aItem.NOUN_DEF; 3433 let dbMeta = nounDef._dbMeta; 3434 if (dbMeta.clearAttrStatement === undefined) { 3435 dbMeta.clearAttrStatement = this._createAsyncStatement( 3436 "DELETE FROM " + 3437 nounDef.attrTableName + 3438 " WHERE " + 3439 nounDef.attrIDColumnName + 3440 " = ?1" 3441 ); 3442 } 3443 3444 if (aItem.id != null) { 3445 dbMeta.clearAttrstatement.bindByIndex(0, aItem.id); 3446 dbMeta.clearAttrStatement.executeAsync(this.trackAsync()); 3447 } 3448 }, 3449 3450 /** 3451 * escapeStringForLIKE is only available on statements, and sometimes we want 3452 * to use it before we create our statement, so we create a statement just 3453 * for this reason. 3454 */ 3455 get _escapeLikeStatement() { 3456 let statement = this._createAsyncStatement("SELECT 0"); 3457 this.__defineGetter__("_escapeLikeStatement", () => statement); 3458 return this._escapeLikeStatement; 3459 }, 3460 3461 *_convertToDBValuesAndGroupByAttributeID(aAttrDef, aValues) { 3462 let objectNounDef = aAttrDef.objectNounDef; 3463 if (!objectNounDef.usesParameter) { 3464 let dbValues = []; 3465 for (let iValue = 0; iValue < aValues.length; iValue++) { 3466 let value = aValues[iValue]; 3467 // If the empty set is significant and it's an empty signifier, emit 3468 // the appropriate dbvalue. 3469 if (value == null && aAttrDef.emptySetIsSignificant) { 3470 yield [this.kEmptySetAttrId, [aAttrDef.id]]; 3471 // Bail if the only value was us; we don't want to add a 3472 // value-posessing wildcard into the mix. 3473 if (aValues.length == 1) { 3474 return; 3475 } 3476 continue; 3477 } 3478 let dbValue = objectNounDef.toParamAndValue(value)[1]; 3479 if (dbValue != null) { 3480 dbValues.push(dbValue); 3481 } 3482 } 3483 yield [aAttrDef.special ? undefined : aAttrDef.id, dbValues]; 3484 return; 3485 } 3486 3487 let curParam, attrID, dbValues; 3488 let attrDBDef = aAttrDef.dbDef; 3489 for (let iValue = 0; iValue < aValues.length; iValue++) { 3490 let value = aValues[iValue]; 3491 // If the empty set is significant and it's an empty signifier, emit 3492 // the appropriate dbvalue. 3493 if (value == null && aAttrDef.emptySetIsSignificant) { 3494 yield [this.kEmptySetAttrId, [aAttrDef.id]]; 3495 // Bail if the only value was us; we don't want to add a 3496 // value-posessing wildcard into the mix. 3497 if (aValues.length == 1) { 3498 return; 3499 } 3500 continue; 3501 } 3502 let [dbParam, dbValue] = objectNounDef.toParamAndValue(value); 3503 if (curParam === undefined) { 3504 curParam = dbParam; 3505 attrID = attrDBDef.bindParameter(curParam); 3506 if (dbValue != null) { 3507 dbValues = [dbValue]; 3508 } else { 3509 dbValues = []; 3510 } 3511 } else if (curParam == dbParam) { 3512 if (dbValue != null) { 3513 dbValues.push(dbValue); 3514 } 3515 } else { 3516 yield [attrID, dbValues]; 3517 curParam = dbParam; 3518 attrID = attrDBDef.bindParameter(curParam); 3519 if (dbValue != null) { 3520 dbValues = [dbValue]; 3521 } else { 3522 dbValues = []; 3523 } 3524 } 3525 } 3526 if (dbValues !== undefined) { 3527 yield [attrID, dbValues]; 3528 } 3529 }, 3530 3531 *_convertRangesToDBStringsAndGroupByAttributeID( 3532 aAttrDef, 3533 aValues, 3534 aValueColumnName 3535 ) { 3536 let objectNounDef = aAttrDef.objectNounDef; 3537 if (!objectNounDef.usesParameter) { 3538 let dbStrings = []; 3539 for (let iValue = 0; iValue < aValues.length; iValue++) { 3540 let [lowerVal, upperVal] = aValues[iValue]; 3541 // they both can't be null. that is the law. 3542 if (lowerVal == null) { 3543 dbStrings.push( 3544 aValueColumnName + 3545 " <= " + 3546 objectNounDef.toParamAndValue(upperVal)[1] 3547 ); 3548 } else if (upperVal == null) { 3549 dbStrings.push( 3550 aValueColumnName + 3551 " >= " + 3552 objectNounDef.toParamAndValue(lowerVal)[1] 3553 ); 3554 } else { 3555 // No one is null! 3556 dbStrings.push( 3557 aValueColumnName + 3558 " BETWEEN " + 3559 objectNounDef.toParamAndValue(lowerVal)[1] + 3560 " AND " + 3561 objectNounDef.toParamAndValue(upperVal)[1] 3562 ); 3563 } 3564 } 3565 yield [aAttrDef.special ? undefined : aAttrDef.id, dbStrings]; 3566 return; 3567 } 3568 3569 let curParam, attrID, dbStrings; 3570 let attrDBDef = aAttrDef.dbDef; 3571 for (let iValue = 0; iValue < aValues.length; iValue++) { 3572 let [lowerVal, upperVal] = aValues[iValue]; 3573 3574 let dbString, dbParam, lowerDBVal, upperDBVal; 3575 // they both can't be null. that is the law. 3576 if (lowerVal == null) { 3577 [dbParam, upperDBVal] = objectNounDef.toParamAndValue(upperVal); 3578 dbString = aValueColumnName + " <= " + upperDBVal; 3579 } else if (upperVal == null) { 3580 [dbParam, lowerDBVal] = objectNounDef.toParamAndValue(lowerVal); 3581 dbString = aValueColumnName + " >= " + lowerDBVal; 3582 } else { 3583 // no one is null! 3584 [dbParam, lowerDBVal] = objectNounDef.toParamAndValue(lowerVal); 3585 dbString = 3586 aValueColumnName + 3587 " BETWEEN " + 3588 lowerDBVal + 3589 " AND " + 3590 objectNounDef.toParamAndValue(upperVal)[1]; 3591 } 3592 3593 if (curParam === undefined) { 3594 curParam = dbParam; 3595 attrID = attrDBDef.bindParameter(curParam); 3596 dbStrings = [dbString]; 3597 } else if (curParam === dbParam) { 3598 dbStrings.push(dbString); 3599 } else { 3600 yield [attrID, dbStrings]; 3601 curParam = dbParam; 3602 attrID = attrDBDef.bindParameter(curParam); 3603 dbStrings = [dbString]; 3604 } 3605 } 3606 if (dbStrings !== undefined) { 3607 yield [attrID, dbStrings]; 3608 } 3609 }, 3610 3611 /* eslint-disable complexity */ 3612 /** 3613 * Perform a database query given a GlodaQueryClass instance that specifies 3614 * a set of constraints relating to the noun type associated with the query. 3615 * A GlodaCollection is returned containing the results of the look-up. 3616 * By default the collection is "live", and will mutate (generating events to 3617 * its listener) as the state of the database changes. 3618 * This functionality is made user/extension visible by the Query's 3619 * getCollection (asynchronous). 3620 * 3621 * @param [aArgs] See |GlodaQuery.getCollection| for info. 3622 */ 3623 queryFromQuery( 3624 aQuery, 3625 aListener, 3626 aListenerData, 3627 aExistingCollection, 3628 aMasterCollection, 3629 aArgs 3630 ) { 3631 // when changing this method, be sure that GlodaQuery's testMatch function 3632 // likewise has its changes made. 3633 let nounDef = aQuery._nounDef; 3634 3635 let whereClauses = []; 3636 let unionQueries = [aQuery].concat(aQuery._unions); 3637 let boundArgs = []; 3638 3639 // Use the dbQueryValidityConstraintSuffix to provide constraints that 3640 // filter items down to those that are valid for the query mechanism to 3641 // return. For example, in the case of messages, deleted or ghost 3642 // messages should not be returned by this query layer. We require 3643 // hand-rolled SQL to do that for now. 3644 let validityConstraintSuffix; 3645 if ( 3646 nounDef.dbQueryValidityConstraintSuffix && 3647 !aQuery.options.noDbQueryValidityConstraints 3648 ) { 3649 validityConstraintSuffix = nounDef.dbQueryValidityConstraintSuffix; 3650 } else { 3651 validityConstraintSuffix = ""; 3652 } 3653 3654 for (let iUnion = 0; iUnion < unionQueries.length; iUnion++) { 3655 let curQuery = unionQueries[iUnion]; 3656 let selects = []; 3657 3658 let lastConstraintWasSpecial = false; 3659 let curConstraintIsSpecial; 3660 3661 for ( 3662 let iConstraint = 0; 3663 iConstraint < curQuery._constraints.length; 3664 iConstraint++ 3665 ) { 3666 let constraint = curQuery._constraints[iConstraint]; 3667 let [constraintType, attrDef] = constraint; 3668 let constraintValues = constraint.slice(2); 3669 3670 let tableName, idColumnName, valueColumnName; 3671 if (constraintType == this.kConstraintIdIn) { 3672 // we don't need any of the next cases' setup code, and we especially 3673 // would prefer that attrDef isn't accessed since it's null for us. 3674 } else if (attrDef.special) { 3675 tableName = nounDef.tableName; 3676 idColumnName = "id"; // canonical id for a table is "id". 3677 valueColumnName = attrDef.specialColumnName; 3678 curConstraintIsSpecial = true; 3679 } else { 3680 tableName = nounDef.attrTableName; 3681 idColumnName = nounDef.attrIDColumnName; 3682 valueColumnName = "value"; 3683 curConstraintIsSpecial = false; 3684 } 3685 3686 let select = null, 3687 test = null; 3688 if (constraintType === this.kConstraintIdIn) { 3689 // this is somewhat of a trick. this does mean that this can be the 3690 // only constraint. Namely, our idiom is: 3691 // SELECT * FROM blah WHERE id IN (a INTERSECT b INTERSECT c) 3692 // but if we only have 'a', then that becomes "...IN (a)", and if 3693 // 'a' is not a select but a list of id's... tricky, no? 3694 select = constraintValues.join(","); 3695 } else if (constraintType === this.kConstraintIn) { 3696 // @testpoint gloda.datastore.sqlgen.kConstraintIn 3697 let clauses = []; 3698 for (let [ 3699 attrID, 3700 values, 3701 ] of this._convertToDBValuesAndGroupByAttributeID( 3702 attrDef, 3703 constraintValues 3704 )) { 3705 let clausePart; 3706 if (attrID !== undefined) { 3707 clausePart = 3708 "(attributeID = " + attrID + (values.length ? " AND " : ""); 3709 } else { 3710 clausePart = "("; 3711 } 3712 if (values.length) { 3713 // strings need to be escaped, we would use ? binding, except 3714 // that gets mad if we have too many strings... so we use our 3715 // own escaping logic. correctly escaping is easy, but it still 3716 // feels wrong to do it. (just double the quote character...) 3717 if ( 3718 "special" in attrDef && 3719 attrDef.special == this.kSpecialString 3720 ) { 3721 clausePart += 3722 valueColumnName + 3723 " IN (" + 3724 values 3725 .map(v => "'" + v.replace(/\'/g, "''") + "'") 3726 .join(",") + 3727 "))"; 3728 } else { 3729 clausePart += 3730 valueColumnName + " IN (" + values.join(",") + "))"; 3731 } 3732 } else { 3733 clausePart += ")"; 3734 } 3735 clauses.push(clausePart); 3736 } 3737 test = clauses.join(" OR "); 3738 } else if (constraintType === this.kConstraintRanges) { 3739 // @testpoint gloda.datastore.sqlgen.kConstraintRanges 3740 let clauses = []; 3741 for (let [ 3742 attrID, 3743 dbStrings, 3744 ] of this._convertRangesToDBStringsAndGroupByAttributeID( 3745 attrDef, 3746 constraintValues, 3747 valueColumnName 3748 )) { 3749 if (attrID !== undefined) { 3750 clauses.push( 3751 "(attributeID = " + 3752 attrID + 3753 " AND (" + 3754 dbStrings.join(" OR ") + 3755 "))" 3756 ); 3757 } else { 3758 clauses.push("(" + dbStrings.join(" OR ") + ")"); 3759 } 3760 } 3761 test = clauses.join(" OR "); 3762 } else if (constraintType === this.kConstraintEquals) { 3763 // @testpoint gloda.datastore.sqlgen.kConstraintEquals 3764 let clauses = []; 3765 for (let [ 3766 attrID, 3767 values, 3768 ] of this._convertToDBValuesAndGroupByAttributeID( 3769 attrDef, 3770 constraintValues 3771 )) { 3772 if (attrID !== undefined) { 3773 clauses.push( 3774 "(attributeID = " + 3775 attrID + 3776 " AND (" + 3777 values.map(_ => valueColumnName + " = ?").join(" OR ") + 3778 "))" 3779 ); 3780 } else { 3781 clauses.push( 3782 "(" + 3783 values.map(_ => valueColumnName + " = ?").join(" OR ") + 3784 ")" 3785 ); 3786 } 3787 boundArgs.push.apply(boundArgs, values); 3788 } 3789 test = clauses.join(" OR "); 3790 } else if (constraintType === this.kConstraintStringLike) { 3791 // @testpoint gloda.datastore.sqlgen.kConstraintStringLike 3792 let likePayload = ""; 3793 for (let valuePart of constraintValues) { 3794 if (typeof valuePart == "string") { 3795 likePayload += this._escapeLikeStatement.escapeStringForLIKE( 3796 valuePart, 3797 "/" 3798 ); 3799 } else { 3800 likePayload += "%"; 3801 } 3802 } 3803 test = valueColumnName + " LIKE ? ESCAPE '/'"; 3804 boundArgs.push(likePayload); 3805 } else if (constraintType === this.kConstraintFulltext) { 3806 // @testpoint gloda.datastore.sqlgen.kConstraintFulltext 3807 let matchStr = constraintValues[0]; 3808 select = 3809 "SELECT docid FROM " + 3810 nounDef.tableName + 3811 "Text" + 3812 " WHERE " + 3813 attrDef.specialColumnName + 3814 " MATCH ?"; 3815 boundArgs.push(matchStr); 3816 } 3817 3818 if (curConstraintIsSpecial && lastConstraintWasSpecial && test) { 3819 selects[selects.length - 1] += " AND " + test; 3820 } else if (select) { 3821 selects.push(select); 3822 } else if (test) { 3823 select = 3824 "SELECT " + idColumnName + " FROM " + tableName + " WHERE " + test; 3825 selects.push(select); 3826 } else { 3827 this._log.warn( 3828 "Unable to translate constraint of type " + 3829 constraintType + 3830 " on attribute bound as " + 3831 nounDef.name 3832 ); 3833 } 3834 3835 lastConstraintWasSpecial = curConstraintIsSpecial; 3836 } 3837 3838 if (selects.length) { 3839 whereClauses.push( 3840 "id IN (" + 3841 selects.join(" INTERSECT ") + 3842 ")" + 3843 validityConstraintSuffix 3844 ); 3845 } 3846 } 3847 3848 let sqlString = "SELECT * FROM " + nounDef.tableName; 3849 if (!aQuery.options.noMagic) { 3850 if ( 3851 aQuery.options.noDbQueryValidityConstraints && 3852 nounDef.dbQueryJoinMagicWithNoValidityConstraints 3853 ) { 3854 sqlString += nounDef.dbQueryJoinMagicWithNoValidityConstraints; 3855 } else if (nounDef.dbQueryJoinMagic) { 3856 sqlString += nounDef.dbQueryJoinMagic; 3857 } 3858 } 3859 3860 if (whereClauses.length) { 3861 sqlString += " WHERE (" + whereClauses.join(") OR (") + ")"; 3862 } 3863 3864 if (aQuery.options.explicitSQL) { 3865 sqlString = aQuery.options.explicitSQL; 3866 } 3867 3868 if (aQuery.options.outerWrapColumns) { 3869 sqlString = 3870 "SELECT *, " + 3871 aQuery.options.outerWrapColumns.join(", ") + 3872 " FROM (" + 3873 sqlString + 3874 ")"; 3875 } 3876 3877 if (aQuery._order.length) { 3878 let orderClauses = []; 3879 for (let colName of aQuery._order) { 3880 if (colName.startsWith("-")) { 3881 orderClauses.push(colName.substring(1) + " DESC"); 3882 } else { 3883 orderClauses.push(colName + " ASC"); 3884 } 3885 } 3886 sqlString += " ORDER BY " + orderClauses.join(", "); 3887 } 3888 3889 if (aQuery._limit) { 3890 if (!("limitClauseAlreadyIncluded" in aQuery.options)) { 3891 sqlString += " LIMIT ?"; 3892 } 3893 boundArgs.push(aQuery._limit); 3894 } 3895 3896 this._log.debug("QUERY FROM QUERY: " + sqlString + " ARGS: " + boundArgs); 3897 3898 // if we want to become explicit, replace the query (which has already 3899 // provided our actual SQL query) with an explicit query. This will be 3900 // what gets attached to the collection in the event we create a new 3901 // collection. If we are reusing one, we assume that the explicitness, 3902 // if desired, already happened. 3903 // (we do not need to pass an argument to the explicitQueryClass constructor 3904 // because it will be passed in to the collection's constructor, which will 3905 // ensure that the collection attribute gets set.) 3906 if (aArgs && "becomeExplicit" in aArgs && aArgs.becomeExplicit) { 3907 aQuery = new nounDef.explicitQueryClass(); 3908 } else if (aArgs && "becomeNull" in aArgs && aArgs.becomeNull) { 3909 aQuery = new nounDef.nullQueryClass(); 3910 } 3911 3912 return this._queryFromSQLString( 3913 sqlString, 3914 boundArgs, 3915 nounDef, 3916 aQuery, 3917 aListener, 3918 aListenerData, 3919 aExistingCollection, 3920 aMasterCollection 3921 ); 3922 }, 3923 /* eslint-enable complexity */ 3924 3925 _queryFromSQLString( 3926 aSqlString, 3927 aBoundArgs, 3928 aNounDef, 3929 aQuery, 3930 aListener, 3931 aListenerData, 3932 aExistingCollection, 3933 aMasterCollection 3934 ) { 3935 let statement = this._createAsyncStatement(aSqlString, true); 3936 for (let [iBinding, bindingValue] of aBoundArgs.entries()) { 3937 this._bindVariant(statement, iBinding, bindingValue); 3938 } 3939 3940 let collection; 3941 if (aExistingCollection) { 3942 collection = aExistingCollection; 3943 } else { 3944 collection = new GlodaCollection( 3945 aNounDef, 3946 [], 3947 aQuery, 3948 aListener, 3949 aMasterCollection 3950 ); 3951 GlodaCollectionManager.registerCollection(collection); 3952 // we don't want to overwrite the existing listener or its data, but this 3953 // does raise the question about what should happen if we get passed in 3954 // a different listener and/or data. 3955 if (aListenerData !== undefined) { 3956 collection.data = aListenerData; 3957 } 3958 } 3959 if (aListenerData) { 3960 if (collection.dataStack) { 3961 collection.dataStack.push(aListenerData); 3962 } else { 3963 collection.dataStack = [aListenerData]; 3964 } 3965 } 3966 3967 statement.executeAsync( 3968 new QueryFromQueryCallback(statement, aNounDef, collection) 3969 ); 3970 statement.finalize(); 3971 return collection; 3972 }, 3973 3974 /* eslint-disable complexity */ 3975 loadNounItem(aItem, aReferencesByNounID, aInverseReferencesByNounID) { 3976 let attribIDToDBDefAndParam = this._attributeIDToDBDefAndParam; 3977 3978 let hadDeps = aItem._deps != null; 3979 let deps = aItem._deps || {}; 3980 let hasDeps = false; 3981 3982 for (let attrib of aItem.NOUN_DEF.specialLoadAttribs) { 3983 let objectNounDef = attrib.objectNounDef; 3984 3985 if ( 3986 "special" in attrib && 3987 attrib.special === this.kSpecialColumnChildren 3988 ) { 3989 let invReferences = aInverseReferencesByNounID[objectNounDef.id]; 3990 if (invReferences === undefined) { 3991 invReferences = aInverseReferencesByNounID[objectNounDef.id] = {}; 3992 } 3993 // only contribute if it's not already pending or there 3994 if ( 3995 !(attrib.id in deps) && 3996 aItem[attrib.storageAttributeName] == null 3997 ) { 3998 // this._log.debug(" Adding inv ref for: " + aItem.id); 3999 if (!(aItem.id in invReferences)) { 4000 invReferences[aItem.id] = null; 4001 } 4002 deps[attrib.id] = null; 4003 hasDeps = true; 4004 } 4005 } else if ( 4006 "special" in attrib && 4007 attrib.special === this.kSpecialColumnParent 4008 ) { 4009 let references = aReferencesByNounID[objectNounDef.id]; 4010 if (references === undefined) { 4011 references = aReferencesByNounID[objectNounDef.id] = {}; 4012 } 4013 // nothing to contribute if it's already there 4014 if ( 4015 !(attrib.id in deps) && 4016 aItem[attrib.valueStorageAttributeName] == null 4017 ) { 4018 let parentID = aItem[attrib.idStorageAttributeName]; 4019 if (!(parentID in references)) { 4020 references[parentID] = null; 4021 } 4022 // this._log.debug(" Adding parent ref for: " + 4023 // aItem[attrib.idStorageAttributeName]); 4024 deps[attrib.id] = null; 4025 hasDeps = true; 4026 } else { 4027 this._log.debug( 4028 " paranoia value storage: " + 4029 aItem[attrib.valueStorageAttributeName] 4030 ); 4031 } 4032 } 4033 } 4034 4035 // bail here if arbitrary values are not allowed, there just is no 4036 // encoded json, or we already had dependencies for this guy, implying 4037 // the json pass has already been performed 4038 if (!aItem.NOUN_DEF.allowsArbitraryAttrs || !aItem._jsonText || hadDeps) { 4039 if (hasDeps) { 4040 aItem._deps = deps; 4041 } 4042 return hasDeps; 4043 } 4044 4045 // this._log.debug(" load json: " + aItem._jsonText); 4046 let jsonDict = JSON.parse(aItem._jsonText); 4047 delete aItem._jsonText; 4048 4049 // Iterate over the attributes on the item 4050 for (let attribId in jsonDict) { 4051 let jsonValue = jsonDict[attribId]; 4052 // It is technically impossible for attribute ids to go away at this 4053 // point in time. This would require someone to monkey around with 4054 // our schema. But we will introduce this functionality one day, so 4055 // prepare for it now. 4056 if (!(attribId in attribIDToDBDefAndParam)) { 4057 continue; 4058 } 4059 // find the attribute definition that corresponds to this key 4060 let dbAttrib = attribIDToDBDefAndParam[attribId][0]; 4061 4062 let attrib = dbAttrib.attrDef; 4063 // The attribute definition will fail to exist if no one defines the 4064 // attribute anymore. This can happen for many reasons: an extension 4065 // was uninstalled, an extension was changed and no longer defines the 4066 // attribute, or patches are being applied/unapplied. Ignore this 4067 // attribute if missing. 4068 if (attrib == null) { 4069 continue; 4070 } 4071 let objectNounDef = attrib.objectNounDef; 4072 4073 // If it has a tableName member but no fromJSON, then it's a persistent 4074 // object that needs to be loaded, which also means we need to hold it in 4075 // a collection owned by our collection. 4076 // (If it has a fromJSON method, then it's a special case like 4077 // MimeTypeNoun where it is authoritatively backed by a table but caches 4078 // everything into memory. There is no case where fromJSON would be 4079 // implemented but we should still be doing database lookups.) 4080 if (objectNounDef.tableName && !objectNounDef.fromJSON) { 4081 let references = aReferencesByNounID[objectNounDef.id]; 4082 if (references === undefined) { 4083 references = aReferencesByNounID[objectNounDef.id] = {}; 4084 } 4085 4086 if (attrib.singular) { 4087 if (!(jsonValue in references)) { 4088 references[jsonValue] = null; 4089 } 4090 } else { 4091 for (let key in jsonValue) { 4092 let anID = jsonValue[key]; 4093 if (!(anID in references)) { 4094 references[anID] = null; 4095 } 4096 } 4097 } 4098 4099 deps[attribId] = jsonValue; 4100 hasDeps = true; 4101 } else if (objectNounDef.contributeObjDependencies) { 4102 /* if it has custom contribution logic, use it */ 4103 if ( 4104 objectNounDef.contributeObjDependencies( 4105 jsonValue, 4106 aReferencesByNounID, 4107 aInverseReferencesByNounID 4108 ) 4109 ) { 4110 deps[attribId] = jsonValue; 4111 hasDeps = true; 4112 } else { 4113 // just propagate the value, it's some form of simple sentinel 4114 aItem[attrib.boundName] = jsonValue; 4115 } 4116 } else if (objectNounDef.fromJSON) { 4117 // otherwise, the value just needs to be de-persisted, or... 4118 if (attrib.singular) { 4119 // For consistency with the non-singular case, we don't assign the 4120 // attribute if undefined is returned. 4121 let deserialized = objectNounDef.fromJSON(jsonValue, aItem); 4122 if (deserialized !== undefined) { 4123 aItem[attrib.boundName] = deserialized; 4124 } 4125 } else { 4126 // Convert all the entries in the list filtering out any undefined 4127 // values. (TagNoun will do this if the tag is now dead.) 4128 let outList = []; 4129 for (let key in jsonValue) { 4130 let val = jsonValue[key]; 4131 let deserialized = objectNounDef.fromJSON(val, aItem); 4132 if (deserialized !== undefined) { 4133 outList.push(deserialized); 4134 } 4135 } 4136 // Note: It's possible if we filtered things out that this is an empty 4137 // list. This is acceptable because this is somewhat of an unusual 4138 // case and I don't think we want to further complicate our 4139 // semantics. 4140 aItem[attrib.boundName] = outList; 4141 } 4142 } else { 4143 // it's fine as is 4144 aItem[attrib.boundName] = jsonValue; 4145 } 4146 } 4147 4148 if (hasDeps) { 4149 aItem._deps = deps; 4150 } 4151 return hasDeps; 4152 }, 4153 /* eslint-enable complexity */ 4154 4155 loadNounDeferredDeps(aItem, aReferencesByNounID, aInverseReferencesByNounID) { 4156 if (aItem._deps === undefined) { 4157 return; 4158 } 4159 4160 let attribIDToDBDefAndParam = this._attributeIDToDBDefAndParam; 4161 4162 for (let [attribId, jsonValue] of Object.entries(aItem._deps)) { 4163 let dbAttrib = attribIDToDBDefAndParam[attribId][0]; 4164 let attrib = dbAttrib.attrDef; 4165 4166 let objectNounDef = attrib.objectNounDef; 4167 let references = aReferencesByNounID[objectNounDef.id]; 4168 if (attrib.special) { 4169 if (attrib.special === this.kSpecialColumnChildren) { 4170 let inverseReferences = aInverseReferencesByNounID[objectNounDef.id]; 4171 // this._log.info("inverse assignment: " + objectNounDef.id + 4172 // " of " + aItem.id) 4173 aItem[attrib.storageAttributeName] = inverseReferences[aItem.id]; 4174 } else if (attrib.special === this.kSpecialColumnParent) { 4175 // this._log.info("parent column load: " + objectNounDef.id + 4176 // " storage value: " + aItem[attrib.idStorageAttributeName]); 4177 aItem[attrib.valueStorageAttributeName] = 4178 references[aItem[attrib.idStorageAttributeName]]; 4179 } 4180 } else if (objectNounDef.tableName) { 4181 if (attrib.singular) { 4182 aItem[attrib.boundName] = references[jsonValue]; 4183 } else { 4184 aItem[attrib.boundName] = Object.keys(jsonValue).map( 4185 key => references[jsonValue[key]] 4186 ); 4187 } 4188 } else if (objectNounDef.contributeObjDependencies) { 4189 aItem[attrib.boundName] = objectNounDef.resolveObjDependencies( 4190 jsonValue, 4191 aReferencesByNounID, 4192 aInverseReferencesByNounID 4193 ); 4194 } 4195 // there is no other case 4196 } 4197 4198 delete aItem._deps; 4199 }, 4200 4201 /* ********** Contact ********** */ 4202 _nextContactId: 1, 4203 4204 _populateContactManagedId() { 4205 let stmt = this._createSyncStatement("SELECT MAX(id) FROM contacts", true); 4206 if (stmt.executeStep()) { 4207 // no chance of this SQLITE_BUSY on this call 4208 this._nextContactId = stmt.getInt64(0) + 1; 4209 } 4210 stmt.finalize(); 4211 }, 4212 4213 get _insertContactStatement() { 4214 let statement = this._createAsyncStatement( 4215 "INSERT INTO contacts (id, directoryUUID, contactUUID, name, popularity,\ 4216 frecency, jsonAttributes) \ 4217 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)" 4218 ); 4219 this.__defineGetter__("_insertContactStatement", () => statement); 4220 return this._insertContactStatement; 4221 }, 4222 4223 createContact(aDirectoryUUID, aContactUUID, aName, aPopularity, aFrecency) { 4224 let contactID = this._nextContactId++; 4225 let contact = new GlodaContact( 4226 this, 4227 contactID, 4228 aDirectoryUUID, 4229 aContactUUID, 4230 aName, 4231 aPopularity, 4232 aFrecency 4233 ); 4234 return contact; 4235 }, 4236 4237 insertContact(aContact) { 4238 let ics = this._insertContactStatement; 4239 ics.bindByIndex(0, aContact.id); 4240 if (aContact.directoryUUID == null) { 4241 ics.bindByIndex(1, null); 4242 } else { 4243 ics.bindByIndex(1, aContact.directoryUUID); 4244 } 4245 if (aContact.contactUUID == null) { 4246 ics.bindByIndex(2, null); 4247 } else { 4248 ics.bindByIndex(2, aContact.contactUUID); 4249 } 4250 ics.bindByIndex(3, aContact.name); 4251 ics.bindByIndex(4, aContact.popularity); 4252 ics.bindByIndex(5, aContact.frecency); 4253 if (aContact._jsonText) { 4254 ics.bindByIndex(6, aContact._jsonText); 4255 } else { 4256 ics.bindByIndex(6, null); 4257 } 4258 4259 ics.executeAsync(this.trackAsync()); 4260 4261 return aContact; 4262 }, 4263 4264 get _updateContactStatement() { 4265 let statement = this._createAsyncStatement( 4266 "UPDATE contacts SET directoryUUID = ?1, \ 4267 contactUUID = ?2, \ 4268 name = ?3, \ 4269 popularity = ?4, \ 4270 frecency = ?5, \ 4271 jsonAttributes = ?6 \ 4272 WHERE id = ?7" 4273 ); 4274 this.__defineGetter__("_updateContactStatement", () => statement); 4275 return this._updateContactStatement; 4276 }, 4277 4278 updateContact(aContact) { 4279 let ucs = this._updateContactStatement; 4280 ucs.bindByIndex(6, aContact.id); 4281 ucs.bindByIndex(0, aContact.directoryUUID); 4282 ucs.bindByIndex(1, aContact.contactUUID); 4283 ucs.bindByIndex(2, aContact.name); 4284 ucs.bindByIndex(3, aContact.popularity); 4285 ucs.bindByIndex(4, aContact.frecency); 4286 if (aContact._jsonText) { 4287 ucs.bindByIndex(5, aContact._jsonText); 4288 } else { 4289 ucs.bindByIndex(5, null); 4290 } 4291 4292 ucs.executeAsync(this.trackAsync()); 4293 }, 4294 4295 _contactFromRow(aRow) { 4296 let directoryUUID, contactUUID, jsonText; 4297 if (aRow.getTypeOfIndex(1) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 4298 directoryUUID = null; 4299 } else { 4300 directoryUUID = aRow.getString(1); 4301 } 4302 if (aRow.getTypeOfIndex(2) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 4303 contactUUID = null; 4304 } else { 4305 contactUUID = aRow.getString(2); 4306 } 4307 if (aRow.getTypeOfIndex(6) == Ci.mozIStorageValueArray.VALUE_TYPE_NULL) { 4308 jsonText = undefined; 4309 } else { 4310 jsonText = aRow.getString(6); 4311 } 4312 4313 return new GlodaContact( 4314 this, 4315 aRow.getInt64(0), 4316 directoryUUID, 4317 contactUUID, 4318 aRow.getString(5), 4319 aRow.getInt64(3), 4320 aRow.getInt64(4), 4321 jsonText 4322 ); 4323 }, 4324 4325 get _selectContactByIDStatement() { 4326 let statement = this._createSyncStatement( 4327 "SELECT * FROM contacts WHERE id = ?1" 4328 ); 4329 this.__defineGetter__("_selectContactByIDStatement", () => statement); 4330 return this._selectContactByIDStatement; 4331 }, 4332 4333 /** 4334 * Synchronous contact lookup currently only for use by gloda's creation 4335 * of the concept of "me". It is okay for it to be doing synchronous work 4336 * because it is part of the startup process before any user code could 4337 * have gotten a reference to Gloda, but no one else should do this. 4338 */ 4339 getContactByID(aContactID) { 4340 let contact = GlodaCollectionManager.cacheLookupOne( 4341 GlodaContact.prototype.NOUN_ID, 4342 aContactID 4343 ); 4344 4345 if (contact === null) { 4346 let scbi = this._selectContactByIDStatement; 4347 scbi.bindByIndex(0, aContactID); 4348 if (this._syncStep(scbi)) { 4349 contact = this._contactFromRow(scbi); 4350 GlodaCollectionManager.itemLoaded(contact); 4351 } 4352 scbi.reset(); 4353 } 4354 4355 return contact; 4356 }, 4357 4358 /* ********** Identity ********** */ 4359 /** next identity id, managed for async use reasons. */ 4360 _nextIdentityId: 1, 4361 _populateIdentityManagedId() { 4362 let stmt = this._createSyncStatement( 4363 "SELECT MAX(id) FROM identities", 4364 true 4365 ); 4366 if (stmt.executeStep()) { 4367 // no chance of this SQLITE_BUSY on this call 4368 this._nextIdentityId = stmt.getInt64(0) + 1; 4369 } 4370 stmt.finalize(); 4371 }, 4372 4373 get _insertIdentityStatement() { 4374 let statement = this._createAsyncStatement( 4375 "INSERT INTO identities (id, contactID, kind, value, description, relay) \ 4376 VALUES (?1, ?2, ?3, ?4, ?5, ?6)" 4377 ); 4378 this.__defineGetter__("_insertIdentityStatement", () => statement); 4379 return this._insertIdentityStatement; 4380 }, 4381 4382 createIdentity(aContactID, aContact, aKind, aValue, aDescription, aIsRelay) { 4383 let identityID = this._nextIdentityId++; 4384 let iis = this._insertIdentityStatement; 4385 iis.bindByIndex(0, identityID); 4386 iis.bindByIndex(1, aContactID); 4387 iis.bindByIndex(2, aKind); 4388 iis.bindByIndex(3, aValue); 4389 iis.bindByIndex(4, aDescription); 4390 iis.bindByIndex(5, aIsRelay ? 1 : 0); 4391 iis.executeAsync(this.trackAsync()); 4392 4393 let identity = new GlodaIdentity( 4394 this, 4395 identityID, 4396 aContactID, 4397 aContact, 4398 aKind, 4399 aValue, 4400 aDescription, 4401 aIsRelay 4402 ); 4403 GlodaCollectionManager.itemsAdded(identity.NOUN_ID, [identity]); 4404 return identity; 4405 }, 4406 4407 get _updateIdentityStatement() { 4408 let statement = this._createAsyncStatement( 4409 "UPDATE identities SET contactID = ?1, \ 4410 kind = ?2, \ 4411 value = ?3, \ 4412 description = ?4, \ 4413 relay = ?5 \ 4414 WHERE id = ?6" 4415 ); 4416 this.__defineGetter__("_updateIdentityStatement", () => statement); 4417 return this._updateIdentityStatement; 4418 }, 4419 4420 updateIdentity(aIdentity) { 4421 let ucs = this._updateIdentityStatement; 4422 ucs.bindByIndex(5, aIdentity.id); 4423 ucs.bindByIndex(0, aIdentity.contactID); 4424 ucs.bindByIndex(1, aIdentity.kind); 4425 ucs.bindByIndex(2, aIdentity.value); 4426 ucs.bindByIndex(3, aIdentity.description); 4427 ucs.bindByIndex(4, aIdentity.relay ? 1 : 0); 4428 4429 ucs.executeAsync(this.trackAsync()); 4430 }, 4431 4432 _identityFromRow(aRow) { 4433 return new GlodaIdentity( 4434 this, 4435 aRow.getInt64(0), 4436 aRow.getInt64(1), 4437 null, 4438 aRow.getString(2), 4439 aRow.getString(3), 4440 aRow.getString(4), 4441 !!aRow.getInt32(5) 4442 ); 4443 }, 4444 4445 get _selectIdentityByKindValueStatement() { 4446 let statement = this._createSyncStatement( 4447 "SELECT * FROM identities WHERE kind = ?1 AND value = ?2" 4448 ); 4449 this.__defineGetter__( 4450 "_selectIdentityByKindValueStatement", 4451 () => statement 4452 ); 4453 return this._selectIdentityByKindValueStatement; 4454 }, 4455 4456 /** 4457 * Synchronous lookup of an identity by kind and value, only for use by 4458 * the legacy gloda core code that creates a concept of "me". 4459 * Ex: (email, foo@example.com) 4460 */ 4461 getIdentity(aKind, aValue) { 4462 let identity = GlodaCollectionManager.cacheLookupOneByUniqueValue( 4463 GlodaIdentity.prototype.NOUN_ID, 4464 aKind + "@" + aValue 4465 ); 4466 4467 let ibkv = this._selectIdentityByKindValueStatement; 4468 ibkv.bindByIndex(0, aKind); 4469 ibkv.bindByIndex(1, aValue); 4470 if (this._syncStep(ibkv)) { 4471 identity = this._identityFromRow(ibkv); 4472 GlodaCollectionManager.itemLoaded(identity); 4473 } 4474 ibkv.reset(); 4475 4476 return identity; 4477 }, 4478}; 4479GlodaAttributeDBDef.prototype._datastore = GlodaDatastore; 4480GlodaConversation.prototype._datastore = GlodaDatastore; 4481GlodaFolder.prototype._datastore = GlodaDatastore; 4482GlodaMessage.prototype._datastore = GlodaDatastore; 4483GlodaContact.prototype._datastore = GlodaDatastore; 4484GlodaIdentity.prototype._datastore = GlodaDatastore; 4485