1# 2# Gramps - a GTK+/GNOME based genealogy program 3# 4# Copyright (C) 2000-2008 Donald N. Allingham 5# Copyright (C) 2010 Nick Hall 6# Copyright (C) 2011 Tim G L Lyons 7# 8# This program is free software; you can redistribute it and/or modify 9# it under the terms of the GNU General Public License as published by 10# the Free Software Foundation; either version 2 of the License, or 11# (at your option) any later version. 12# 13# This program is distributed in the hope that it will be useful, 14# but WITHOUT ANY WARRANTY; without even the implied warranty of 15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16# GNU General Public License for more details. 17# 18# You should have received a copy of the GNU General Public License 19# along with this program; if not, write to the Free Software 20# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 21# 22 23""" 24Provide the Berkeley DB (DbBsddb) database backend for Gramps. 25This is used since Gramps version 3.0 26""" 27 28#------------------------------------------------------------------------- 29# 30# Standard python modules 31# 32#------------------------------------------------------------------------- 33import sys 34import pickle 35import os 36import time 37import bisect 38from functools import wraps 39import logging 40from sys import maxsize, getfilesystemencoding, version_info 41from ast import literal_eval as safe_eval 42 43from bsddb3 import dbshelve, db 44from bsddb3.db import DB_CREATE, DB_AUTO_COMMIT, DB_DUP, DB_DUPSORT, DB_RDONLY 45 46DBFLAGS_O = DB_CREATE | DB_AUTO_COMMIT # Default flags for database open 47DBFLAGS_R = DB_RDONLY # Flags to open a database read-only 48DBFLAGS_D = DB_DUP | DB_DUPSORT # Default flags for duplicate keys 49 50#------------------------------------------------------------------------- 51# 52# Gramps modules 53# 54#------------------------------------------------------------------------- 55from gramps.gen.lib.person import Person 56from gramps.gen.lib.family import Family 57from gramps.gen.lib.src import Source 58from gramps.gen.lib.citation import Citation 59from gramps.gen.lib.event import Event 60from gramps.gen.lib.place import Place 61from gramps.gen.lib.repo import Repository 62from gramps.gen.lib.media import Media 63from gramps.gen.lib.note import Note 64from gramps.gen.lib.tag import Tag 65from gramps.gen.lib.genderstats import GenderStats 66from gramps.gen.lib.researcher import Researcher 67 68from . import (DbBsddbRead, DbWriteBase, BSDDBTxn, 69 DbTxn, BsddbBaseCursor, BsddbDowngradeError, DbVersionError, 70 DbEnvironmentError, DbUpgradeRequiredError, find_surname, 71 find_byte_surname, find_surname_name, DbUndoBSDDB as DbUndo) 72 73from gramps.gen.db import exceptions 74from gramps.gen.db.dbconst import * 75from gramps.gen.db.utils import write_lock_file, clear_lock_file 76from gramps.gen.utils.callback import Callback 77from gramps.gen.utils.id import create_id 78from gramps.gen.updatecallback import UpdateCallback 79from gramps.gen.errors import DbError, HandleError 80from gramps.gen.const import HOME_DIR, GRAMPS_LOCALE as glocale 81_ = glocale.translation.gettext 82 83_LOG = logging.getLogger(DBLOGNAME) 84LOG = logging.getLogger(".citation") 85#_LOG.setLevel(logging.DEBUG) 86#_hdlr = logging.StreamHandler() 87#_hdlr.setFormatter(logging.Formatter(fmt="%(name)s.%(levelname)s: %(message)s")) 88#_LOG.addHandler(_hdlr) 89_MINVERSION = 9 90_DBVERSION = 19 91 92IDTRANS = "person_id" 93FIDTRANS = "family_id" 94PIDTRANS = "place_id" 95OIDTRANS = "media_id" 96EIDTRANS = "event_id" 97RIDTRANS = "repo_id" 98NIDTRANS = "note_id" 99SIDTRANS = "source_id" 100CIDTRANS = "citation_id" 101TAGTRANS = "tag_name" 102SURNAMES = "surnames" 103NAME_GROUP = "name_group" 104META = "meta_data" 105PPARENT = "place_parent" 106 107FAMILY_TBL = "family" 108PLACES_TBL = "place" 109SOURCES_TBL = "source" 110CITATIONS_TBL = "citation" 111MEDIA_TBL = "media" 112EVENTS_TBL = "event" 113PERSON_TBL = "person" 114REPO_TBL = "repo" 115NOTE_TBL = "note" 116TAG_TBL = "tag" 117 118REF_MAP = "reference_map" 119REF_PRI = "primary_map" 120REF_REF = "referenced_map" 121 122DBERRS = (db.DBRunRecoveryError, db.DBAccessError, 123 db.DBPageNotFoundError, db.DBInvalidArgError) 124 125# The following two dictionaries provide fast translation 126# between the primary class names and the keys used to reference 127# these classes in the database tables. Beware that changing 128# these maps or modifying the values of the keys will break 129# existing databases. 130 131#------------------------------------------------------------------------- 132# 133# Helper functions 134# 135#------------------------------------------------------------------------- 136 137def find_idmap(key, data): 138 """ return id for association of secondary index. 139 returns a byte string 140 """ 141 val = data[1] 142 if val is not None: 143 val = val.encode('utf-8') 144 return val 145 146def find_parent(key, data): 147 if hasattr(data[5], '__len__') and len(data[5]) > 0: 148 val = data[5][0][0] 149 else: 150 val = '' 151 return val.encode('utf-8') 152 153# Secondary database key lookups for reference_map table 154# reference_map data values are of the form: 155# ((primary_object_class_name, primary_object_handle), 156# (referenced_object_class_name, referenced_object_handle)) 157 158def find_primary_handle(key, data): 159 """ return handle for association of indexes 160 returns byte string 161 """ 162 val = (data)[0][1] 163 return val.encode('utf-8') 164 165def find_referenced_handle(key, data): 166 """ return handle for association of indexes 167 returns byte string 168 """ 169 val = (data)[1][1] 170 return val.encode('utf-8') 171 172#------------------------------------------------------------------------- 173# 174# BsddbWriteCursor 175# 176#------------------------------------------------------------------------- 177class BsddbWriteCursor(BsddbBaseCursor): 178 179 def __init__(self, source, txn=None, **kwargs): 180 BsddbBaseCursor.__init__(self, txn=txn, **kwargs) 181 self.cursor = source.db.cursor(txn) 182 self.source = source 183 184#------------------------------------------------------------------------- 185# 186# DbBsddbAssocCursor 187# 188#------------------------------------------------------------------------- 189class DbBsddbAssocCursor(BsddbBaseCursor): 190 191 def __init__(self, source, txn=None, **kwargs): 192 BsddbBaseCursor.__init__(self, txn=txn, **kwargs) 193 self.cursor = source.cursor(txn) 194 self.source = source 195 196#------------------------------------------------------------------------- 197# 198# DbBsddb 199# 200#------------------------------------------------------------------------- 201class DbBsddb(DbBsddbRead, DbWriteBase, UpdateCallback): 202 """ 203 Gramps database write access object. 204 """ 205 206 # Set up dictionary for callback signal handler 207 # --------------------------------------------- 208 # 1. Signals for primary objects 209 __signals__ = dict((obj+'-'+op, signal) 210 for obj in 211 ['person', 'family', 'event', 'place', 212 'source', 'citation', 'media', 'note', 'repository', 'tag'] 213 for op, signal in zip( 214 ['add', 'update', 'delete', 'rebuild'], 215 [(list,), (list,), (list,), None] 216 ) 217 ) 218 219 # 2. Signals for long operations 220 __signals__.update(('long-op-'+op, signal) for op, signal in zip( 221 ['start', 'heartbeat', 'end'], 222 [(object,), None, None] 223 )) 224 225 # 3. Special signal for change in home person 226 __signals__['home-person-changed'] = None 227 228 # 4. Signal for change in person group name, parameters are 229 __signals__['person-groupname-rebuild'] = (str, str) 230 231 def __init__(self): 232 """Create a new GrampsDB.""" 233 234 self.txn = None 235 DbBsddbRead.__init__(self) 236 DbWriteBase.__init__(self) 237 #UpdateCallback.__init__(self) 238 self.secondary_connected = False 239 self.has_changed = False 240 self.brief_name = None 241 self.update_env_version = False 242 self.update_python_version = False 243 self.update_pickle_version = False 244 245 def catch_db_error(func): 246 """ 247 Decorator function for catching database errors. If *func* throws 248 one of the exceptions in DBERRS, the error is logged and a DbError 249 exception is raised. 250 """ 251 @wraps(func) 252 def try_(self, *args, **kwargs): 253 try: 254 return func(self, *args, **kwargs) 255 except DBERRS as msg: 256 self.__log_error() 257 raise DbError(msg) 258 return try_ 259 260 def __open_db(self, file_name, table_name, dbtype=db.DB_HASH, flags=0): 261 dbmap = db.DB(self.env) 262 dbmap.set_flags(flags) 263 264 fname = os.path.join(file_name, table_name + DBEXT) 265 266 if self.readonly: 267 dbmap.open(fname, table_name, dbtype, DBFLAGS_R) 268 else: 269 dbmap.open(fname, table_name, dbtype, DBFLAGS_O, DBMODE) 270 return dbmap 271 272 def __open_shelf(self, file_name, table_name, dbtype=db.DB_HASH): 273 dbmap = dbshelve.DBShelf(self.env) 274 275 fname = os.path.join(file_name, table_name + DBEXT) 276 277 if self.readonly: 278 dbmap.open(fname, table_name, dbtype, DBFLAGS_R) 279 else: 280 dbmap.open(fname, table_name, dbtype, DBFLAGS_O, DBMODE) 281 return dbmap 282 283 def __log_error(self): 284 mypath = os.path.join(self.get_save_path(),DBRECOVFN) 285 with open(mypath, "w") as ofile: 286 pass 287 try: 288 clear_lock_file(self.get_save_path()) 289 except: 290 pass 291 292 _log_error = __log_error 293 294 # Override get_cursor method from the superclass to add udpate 295 # capability 296 297 @catch_db_error 298 def get_cursor(self, table, txn=None, update=False, commit=False): 299 """ Helper function to return a cursor over a table """ 300 if update and not txn: 301 txn = self.env.txn_begin(self.txn) 302 return BsddbWriteCursor(table, txn=txn or self.txn, 303 update=update, commit=commit) 304 305 # cursors for lookups in the reference_map for back reference 306 # lookups. The reference_map has three indexes: 307 # the main index: a tuple of (primary_handle, referenced_handle) 308 # the primary_handle index: the primary_handle 309 # the referenced_handle index: the referenced_handle 310 # the main index is unique, the others allow duplicate entries. 311 312 @catch_db_error 313 def _get_reference_map_primary_cursor(self): 314 """ 315 Returns a reference to a cursor over the reference map primary map 316 """ 317 return DbBsddbAssocCursor(self.reference_map_primary_map, 318 self.txn) 319 320 @catch_db_error 321 def _get_reference_map_referenced_cursor(self): 322 """ 323 Returns a reference to a cursor over the reference map referenced map 324 """ 325 return DbBsddbAssocCursor(self.reference_map_referenced_map, 326 self.txn) 327 328 @catch_db_error 329 def get_place_parent_cursor(self): 330 """ 331 Returns a reference to a cursor over the place parents 332 """ 333 return DbBsddbAssocCursor(self.parents, self.txn) 334 335 # These are overriding the DbBsddbRead's methods of saving metadata 336 # because we now have txn-capable metadata table 337 338 @catch_db_error 339 def set_default_person_handle(self, handle): 340 """Set the default Person to the passed instance.""" 341 if not self.readonly: 342 # Start transaction 343 with BSDDBTxn(self.env, self.metadata) as txn: 344 txn.put(b'default', handle) 345 self.emit('home-person-changed') 346 347 @catch_db_error 348 def get_default_person(self): 349 """Return the default Person of the database.""" 350 person_handle = self.get_default_handle() 351 if person_handle: 352 person = self.get_person_from_handle(person_handle) 353 if person: 354 return person 355 elif (self.metadata) and (not self.readonly): 356 # Start transaction 357 with BSDDBTxn(self.env, self.metadata) as txn: 358 txn.put(b'default', None) 359 return None 360 else: 361 return None 362 363 def set_mediapath(self, path): 364 """Set the default media path for database.""" 365 if self.metadata and not self.readonly: 366 # Start transaction 367 with BSDDBTxn(self.env, self.metadata) as txn: 368 txn.put(b'mediapath', path) 369 370 def __make_zip_backup(self, dirname): 371 import zipfile 372 # In Windows reserved characters is "<>:"/\|?*" 373 reserved_char = r':,<>"/\|?* ' 374 replace_char = "-__________" 375 title = self.get_dbname() 376 trans = title.maketrans(reserved_char, replace_char) 377 title = title.translate(trans) 378 379 if not os.access(dirname, os.W_OK): 380 _LOG.warning("Can't write technical DB backup for %s" % title) 381 return 382 (grampsdb_path, db_code) = os.path.split(dirname) 383 dotgramps_path = os.path.dirname(grampsdb_path) 384 zipname = title + time.strftime("_%Y-%m-%d_%H-%M-%S") + ".zip" 385 zippath = os.path.join(dotgramps_path, zipname) 386 with zipfile.ZipFile(zippath, 'w') as myzip: 387 for filename in os.listdir(dirname): 388 pathname = os.path.join(dirname, filename) 389 myzip.write(pathname, os.path.join(db_code, filename)) 390 _LOG.warning("If upgrade and loading the Family Tree works, you can " 391 "delete the zip file at %s" % 392 zippath) 393 394 def __check_bdb_version(self, name, force_bsddb_upgrade=False, 395 force_bsddb_downgrade=False): 396 """Older version of Berkeley DB can't read data created by a newer 397 version.""" 398 bdb_version = db.version() 399 versionpath = os.path.join(self.path, str(BDBVERSFN)) 400 # Compare the current version of the database (bsddb_version) with the 401 # version of the database code (env_version). If it is a downgrade, 402 # raise an exception because we can't do anything. If they are the same, 403 # return. If it is an upgrade, raise an exception unless the user has 404 # already told us we can upgrade. 405 if os.path.isfile(versionpath): 406 with open(versionpath, "r") as version_file: 407 bsddb_version = version_file.read().strip() 408 env_version = tuple(map(int, bsddb_version[1:-1].split(', '))) 409 else: 410 # bsddb version is unknown 411 bsddb_version = "Unknown" 412 env_version = "Unknown" 413# _LOG.debug("db version %s, program version %s" % (bsddb_version, bdb_version)) 414 415 if env_version == "Unknown" or \ 416 (env_version[0] < bdb_version[0]) or \ 417 (env_version[0] == bdb_version[0] and 418 env_version[1] < bdb_version[1]) or \ 419 (env_version[0] == bdb_version[0] and 420 env_version[1] == bdb_version[1] and 421 env_version[2] < bdb_version[2]): 422 # an upgrade is needed 423 if not force_bsddb_upgrade: 424 _LOG.debug("Bsddb upgrade required from %s to %s" % 425 (bsddb_version, str(bdb_version))) 426 clear_lock_file(name) 427 raise exceptions.BsddbUpgradeRequiredError(bsddb_version, 428 str(bdb_version)) 429 if not self.readonly: 430 _LOG.warning("Bsddb upgrade requested from %s to %s" % 431 (bsddb_version, str(bdb_version))) 432 self.update_env_version = True 433 # Make a backup of the database files anyway 434 self.__make_zip_backup(name) 435 elif (env_version[0] > bdb_version[0]) or \ 436 (env_version[0] == bdb_version[0] and 437 env_version[1] > bdb_version[1]): 438 clear_lock_file(name) 439 raise BsddbDowngradeError(env_version, bdb_version) 440 elif (env_version[0] == bdb_version[0] and 441 env_version[1] == bdb_version[1] and 442 env_version[2] > bdb_version[2]): 443 # A down-grade may be possible 444 if not force_bsddb_downgrade: 445 _LOG.debug("Bsddb downgrade required from %s to %s" % 446 (bsddb_version, str(bdb_version))) 447 clear_lock_file(name) 448 raise exceptions.BsddbDowngradeRequiredError(bsddb_version, 449 str(bdb_version)) 450 # Try to do a down-grade 451 if not self.readonly: 452 _LOG.warning("Bsddb downgrade requested from %s to %s" % 453 (bsddb_version, str(bdb_version))) 454 self.update_env_version = True 455 # Make a backup of the database files anyway 456 self.__make_zip_backup(name) 457 elif env_version == bdb_version: 458 # Bsddb version is OK 459 pass 460 else: 461 # This can't happen 462 raise "Comparison between Bsddb version failed" 463 464 def __check_python_version(self, name, force_python_upgrade=False): 465 """ 466 The 'pickle' format (may) change with each Python version, see 467 http://docs.python.org/3.2/library/pickle.html#pickle. Code commits 468 21777 and 21778 ensure that when going from python2 to python3, the old 469 format can be read. However, once the data has been written in the 470 python3 format, it will not be possible to go back to pyton2. This check 471 test whether we are changing python versions. If going from 2 to 3 it 472 warns the user, and allows it if he confirms. When going from 3 to 3, an 473 error is raised. Because code for python2 did not write the Python 474 version file, if the file is absent, python2 is assumed. 475 """ 476 current_python_version = version_info[0] 477 versionpath = os.path.join(self.path, "pythonversion.txt") 478 if os.path.isfile(versionpath): 479 with open(versionpath, "r") as version_file: 480 db_python_version = int(version_file.read().strip()) 481 else: 482 db_python_version = 2 483 484 if db_python_version == 3 and current_python_version == 2: 485 clear_lock_file(name) 486 raise exceptions.PythonDowngradeError(db_python_version, 487 current_python_version) 488 elif db_python_version == 2 and current_python_version > 2: 489 if not force_python_upgrade: 490 _LOG.debug("Python upgrade required from %s to %s" % 491 (db_python_version, current_python_version)) 492 clear_lock_file(name) 493 raise exceptions.PythonUpgradeRequiredError(db_python_version, 494 current_python_version) 495 # Try to do an upgrade 496 if not self.readonly: 497 _LOG.warning("Python upgrade requested from %s to %s" % 498 (db_python_version, current_python_version)) 499 self.update_python_version = True 500 # Make a backup of the database files anyway 501 self.__make_zip_backup(name) 502 elif db_python_version == 2 and current_python_version == 2: 503 pass 504 505 @catch_db_error 506 def version_supported(self): 507 dbversion = self.metadata.get(b'version', default=0) 508 return ((dbversion <= _DBVERSION) and (dbversion >= _MINVERSION)) 509 510 @catch_db_error 511 def _need_schema_upgrade(self): 512 dbversion = self.metadata.get(b'version', default=0) 513 return not self.readonly and dbversion < _DBVERSION 514 515 def __check_readonly(self, name): 516 """ 517 Return True if we don't have read/write access to the database, 518 otherwise return False (that is, we DO have read/write access) 519 """ 520 521 # See if we write to the target directory at all? 522 if not os.access(name, os.W_OK): 523 return True 524 525 # See if we lack write access to any files in the directory 526 for base in [FAMILY_TBL, PLACES_TBL, SOURCES_TBL, CITATIONS_TBL, 527 MEDIA_TBL, EVENTS_TBL, PERSON_TBL, REPO_TBL, 528 NOTE_TBL, REF_MAP, META]: 529 path = os.path.join(name, base + DBEXT) 530 if os.path.isfile(path) and not os.access(path, os.W_OK): 531 return True 532 533 # All tests passed. Inform caller that we are NOT read only 534 return False 535 536 @catch_db_error 537 def load(self, name, callback=None, mode=DBMODE_W, force_schema_upgrade=False, 538 force_bsddb_upgrade=False, force_bsddb_downgrade=False, 539 force_python_upgrade=False, update=True, 540 username=None, password=None): 541 """ 542 If update is False: then don't update any files; open read-only 543 """ 544 545 if self.__check_readonly(name): 546 mode = DBMODE_R 547 elif update: 548 write_lock_file(name) 549 else: 550 mode = DBMODE_R 551 552 if self.db_is_open: 553 self.close() 554 555 self.readonly = mode == DBMODE_R 556 #super(DbBsddbRead, self).load(name, callback, mode) 557 if callback: 558 callback(12) 559 560 # Save full path and base file name 561 self.full_name = os.path.abspath(name) 562 self.path = self.full_name 563 self.brief_name = os.path.basename(name) 564 565 # We use the existence of the person table as a proxy for the database 566 # being new 567 if not os.path.exists(os.path.join(self.path, 'person.db')): 568 self._write_version(name) 569 570 # If we re-enter load with force_python_upgrade True, then we have 571 # already checked the bsddb version, and then checked python version, 572 # and are agreeing on the upgrade 573 if not force_python_upgrade: 574 self.__check_bdb_version(name, force_bsddb_upgrade, 575 force_bsddb_downgrade) 576 577 self.__check_python_version(name, force_python_upgrade) 578 579 # Check for pickle upgrade 580 versionpath = os.path.join(self.path, str(PCKVERSFN)) 581 # Up to gramps 3.4.x PCKVERSFN was not written 582 # Gramps 4.2 incorrectly wrote PCKVERSFN = 'Yes' for Python2, so check 583 # whether python is upgraded 584 if ((not self.readonly and not self.update_pickle_version) and 585 (not os.path.isfile(versionpath) or self.update_python_version)): 586 _LOG.debug("Make backup in case there is a pickle upgrade") 587 self.__make_zip_backup(name) 588 self.update_pickle_version = True 589 590 # Check for schema upgrade 591 versionpath = os.path.join(self.path, str(SCHVERSFN)) 592 if os.path.isfile(versionpath): 593 with open(versionpath, "r") as version_file: 594 schema_version = int(version_file.read().strip()) 595 else: 596 schema_version = 0 597 if not self.readonly and schema_version < _DBVERSION and \ 598 force_schema_upgrade: 599 _LOG.debug("Make backup in case there is a schema upgrade") 600 self.__make_zip_backup(name) 601 602 # Set up database environment 603 self.env = db.DBEnv() 604 self.env.set_cachesize(0, DBCACHE) 605 606 # These env settings are only needed for Txn environment 607 self.env.set_lk_max_locks(DBLOCKS) 608 self.env.set_lk_max_objects(DBOBJECTS) 609 610 # Set to auto remove stale logs 611 self._set_auto_remove() 612 613 # Set not to flush to disk synchronous, this greatly speeds up 614 # database changes, but comes at the cause of loss of durability, so 615 # power loss might cause a need to run db recovery, see BSDDB manual 616 ## NOTE: due to pre 4.8 bsddb bug it is needed to set this flag before 617 ## open of env, #16492 - http://download.oracle.com/docs/cd/E17076_02/html/installation/changelog_4_8.html 618 self.env.set_flags(db.DB_TXN_WRITE_NOSYNC, 1) 619 620 # The DB_PRIVATE flag must go if we ever move to multi-user setup 621 env_flags = db.DB_CREATE | db.DB_PRIVATE |\ 622 db.DB_INIT_MPOOL 623 if not self.readonly: 624 env_flags |= db.DB_INIT_LOG | db.DB_INIT_TXN 625 # As opposed to before, we always try recovery on databases 626 env_flags |= db.DB_RECOVER 627 628 # Environment name is now based on the filename 629 env_name = name 630 631 try: 632 self.env.open(env_name, env_flags) 633 except Exception as msg: 634 _LOG.warning("Error opening db environment: " + str(msg)) 635 try: 636 self.__close_early() 637 except: 638 pass 639 raise DbEnvironmentError(msg) 640 641 if not self.readonly: 642 self.env.txn_checkpoint() 643 644 if callback: 645 callback(25) 646 647 # Process metadata 648 self.metadata = self.__open_shelf(self.full_name, META) 649 650 # If we cannot work with this DB version, 651 # it makes no sense to go further 652 if not self.version_supported(): 653 tree_vers = self.metadata.get(b'version', default=0) 654 self.__close_early() 655 raise DbVersionError(tree_vers, _MINVERSION, _DBVERSION) 656 657 gstats = self.metadata.get(b'gender_stats', default=None) 658 659 # Ensure version info in metadata 660 if not self.readonly: 661 # Start transaction 662 with BSDDBTxn(self.env, self.metadata) as txn: 663 if gstats is None: 664 # New database. Set up the current version. 665 #self.metadata.put(b'version', _DBVERSION, txn=the_txn) 666 txn.put(b'version', _DBVERSION) 667 txn.put(b'upgraded', 'Yes') 668 elif b'version' not in self.metadata: 669 # Not new database, but the version is missing. 670 # Use 0, but it is likely to fail anyway. 671 txn.put(b'version', 0) 672 673 self.genderStats = GenderStats(gstats) 674 675 # Open main tables in gramps database 676 db_maps = [ 677 ("family_map", FAMILY_TBL, db.DB_HASH), 678 ("place_map", PLACES_TBL, db.DB_HASH), 679 ("source_map", SOURCES_TBL, db.DB_HASH), 680 ("citation_map", CITATIONS_TBL, db.DB_HASH), 681 ("media_map", MEDIA_TBL, db.DB_HASH), 682 ("event_map", EVENTS_TBL, db.DB_HASH), 683 ("person_map", PERSON_TBL, db.DB_HASH), 684 ("repository_map", REPO_TBL, db.DB_HASH), 685 ("note_map", NOTE_TBL, db.DB_HASH), 686 ("tag_map", TAG_TBL, db.DB_HASH), 687 ("reference_map", REF_MAP, db.DB_BTREE), 688 ] 689 690 dbflags = DBFLAGS_R if self.readonly else DBFLAGS_O 691 for (dbmap, dbname, dbtype) in db_maps: 692 _db = self.__open_shelf(self.full_name, dbname, dbtype) 693 setattr(self, dbmap, _db) 694 695 if callback: 696 callback(37) 697 698 # Open name grouping database 699 self.name_group = self.__open_db(self.full_name, NAME_GROUP, 700 db.DB_HASH, db.DB_DUP) 701 702 # We have now successfully opened the database, so if the BSDDB version 703 # has changed, we update the DBSDB version file. 704 705 if self.update_env_version: 706 versionpath = os.path.join(name, BDBVERSFN) 707 with open(versionpath, "w") as version_file: 708 version = str(db.version()) 709 version_file.write(version) 710 _LOG.debug("Updated bsddb version file to %s" % str(db.version())) 711 712 if self.update_python_version: 713 versionpath = os.path.join(name, "pythonversion.txt") 714 version = str(version_info[0]) 715 _LOG.debug("Updated python version file to %s" % version) 716 with open(versionpath, "w") as version_file: 717 version_file.write(version) 718 719 # Here we take care of any changes in the tables related to new code. 720 # If secondary indices change, then they should removed 721 # or rebuilt by upgrade as well. In any case, the 722 # self.secondary_connected flag should be set accordingly. 723 if self.update_pickle_version: 724 from . import upgrade 725 UpdateCallback.__init__(self, callback) 726 upgrade.gramps_upgrade_pickle(self) 727 versionpath = os.path.join(name, str(PCKVERSFN)) 728 with open(versionpath, "w") as version_file: 729 version = "Yes" 730 version_file.write(version) 731 _LOG.debug("Updated pickle version file to %s" % str(version)) 732 733 self.__load_metadata() 734 735 if self._need_schema_upgrade(): 736 oldschema = self.metadata.get(b'version', default=0) 737 newschema = _DBVERSION 738 _LOG.debug("Schema upgrade required from %s to %s" % 739 (oldschema, newschema)) 740 if force_schema_upgrade == True: 741 self._gramps_upgrade(callback) 742 versionpath = os.path.join(name, str(SCHVERSFN)) 743 with open(versionpath, "w") as version_file: 744 version = str(_DBVERSION) 745 version_file.write(version) 746 _LOG.debug("Updated schema version file to %s" % str(version)) 747 else: 748 self.__close_early() 749 clear_lock_file(name) 750 raise DbUpgradeRequiredError(oldschema, newschema) 751 752 if callback: 753 callback(50) 754 755 # Connect secondary indices 756 if not self.secondary_connected: 757 self.__connect_secondary() 758 759 if callback: 760 callback(75) 761 762 # Open undo database 763 self.__open_undodb() 764 self.db_is_open = True 765 766 if callback: 767 callback(87) 768 769 self.abort_possible = True 770 return 1 771 772 def __open_undodb(self): 773 """ 774 Open the undo database 775 """ 776 if not self.readonly: 777 self.undolog = os.path.join(self.full_name, DBUNDOFN) 778 self.undodb = DbUndo(self, self.undolog) 779 self.undodb.open() 780 781 def __close_undodb(self): 782 if not self.readonly: 783 try: 784 self.undodb.close() 785 except db.DBNoSuchFileError: 786 pass 787 788 def get_undodb(self): 789 """ 790 Return the database that keeps track of Undo/Redo operations. 791 """ 792 return self.undodb 793 794 def __load_metadata(self): 795 # name display formats 796 self.name_formats = self.metadata.get(b'name_formats', default=[]) 797 # upgrade formats if they were saved in the old way 798 for format_ix in range(len(self.name_formats)): 799 format = self.name_formats[format_ix] 800 if len(format) == 3: 801 format = format + (True,) 802 self.name_formats[format_ix] = format 803 804 # database owner 805 try: 806 owner_data = self.metadata.get(b'researcher') 807 if owner_data: 808 if len(owner_data[0]) == 7: # Pre-3.3 format 809 owner_data = upgrade_researcher(owner_data) 810 self.owner.unserialize(owner_data) 811 except ImportError: #handle problems with pre-alpha 3.0 812 pass 813 814 # bookmarks 815 def meta(key): 816 return self.metadata.get(key, default=[]) 817 818 self.bookmarks.set(meta(b'bookmarks')) 819 self.family_bookmarks.set(meta(b'family_bookmarks')) 820 self.event_bookmarks.set(meta(b'event_bookmarks')) 821 self.source_bookmarks.set(meta(b'source_bookmarks')) 822 self.citation_bookmarks.set(meta(b'citation_bookmarks')) 823 self.repo_bookmarks.set(meta(b'repo_bookmarks')) 824 self.media_bookmarks.set(meta(b'media_bookmarks')) 825 self.place_bookmarks.set(meta(b'place_bookmarks')) 826 self.note_bookmarks.set(meta(b'note_bookmarks')) 827 828 # Custom type values 829 self.event_names = set(meta(b'event_names')) 830 self.family_attributes = set(meta(b'fattr_names')) 831 self.individual_attributes = set(meta(b'pattr_names')) 832 self.source_attributes = set(meta(b'sattr_names')) 833 self.marker_names = set(meta(b'marker_names')) 834 self.child_ref_types = set(meta(b'child_refs')) 835 self.family_rel_types = set(meta(b'family_rels')) 836 self.event_role_names = set(meta(b'event_roles')) 837 self.name_types = set(meta(b'name_types')) 838 self.origin_types = set(meta(b'origin_types')) 839 self.repository_types = set(meta(b'repo_types')) 840 self.note_types = set(meta(b'note_types')) 841 self.source_media_types = set(meta(b'sm_types')) 842 self.url_types = set(meta(b'url_types')) 843 self.media_attributes = set(meta(b'mattr_names')) 844 self.event_attributes = set(meta(b'eattr_names')) 845 self.place_types = set(meta(b'place_types')) 846 847 # surname list 848 self.surname_list = meta(b'surname_list') 849 850 def __connect_secondary(self): 851 """ 852 Connect or creates secondary index tables. 853 854 It assumes that the tables either exist and are in the right 855 format or do not exist (in which case they get created). 856 857 It is the responsibility of upgrade code to either create 858 or remove invalid secondary index tables. 859 """ 860 861 # index tables used just for speeding up searches 862 self.surnames = self.__open_db(self.full_name, SURNAMES, db.DB_BTREE, 863 db.DB_DUP | db.DB_DUPSORT) 864 865 db_maps = [ 866 ("id_trans", IDTRANS, db.DB_HASH, 0), 867 ("fid_trans", FIDTRANS, db.DB_HASH, 0), 868 ("eid_trans", EIDTRANS, db.DB_HASH, 0), 869 ("pid_trans", PIDTRANS, db.DB_HASH, 0), 870 ("sid_trans", SIDTRANS, db.DB_HASH, 0), 871 ("cid_trans", CIDTRANS, db.DB_HASH, 0), 872 ("oid_trans", OIDTRANS, db.DB_HASH, 0), 873 ("rid_trans", RIDTRANS, db.DB_HASH, 0), 874 ("nid_trans", NIDTRANS, db.DB_HASH, 0), 875 ("tag_trans", TAGTRANS, db.DB_HASH, 0), 876 ("parents", PPARENT, db.DB_HASH, 0), 877 ("reference_map_primary_map", REF_PRI, db.DB_BTREE, 0), 878 ("reference_map_referenced_map", REF_REF, db.DB_BTREE, db.DB_DUPSORT), 879 ] 880 881 for (dbmap, dbname, dbtype, dbflags) in db_maps: 882 _db = self.__open_db(self.full_name, dbname, dbtype, 883 db.DB_DUP | dbflags) 884 setattr(self, dbmap, _db) 885 886 if not self.readonly: 887 888 assoc = [ 889 (self.person_map, self.surnames, find_byte_surname), 890 (self.person_map, self.id_trans, find_idmap), 891 (self.family_map, self.fid_trans, find_idmap), 892 (self.event_map, self.eid_trans, find_idmap), 893 (self.place_map, self.pid_trans, find_idmap), 894 (self.place_map, self.parents, find_parent), 895 (self.source_map, self.sid_trans, find_idmap), 896 (self.citation_map, self.cid_trans, find_idmap), 897 (self.media_map, self.oid_trans, find_idmap), 898 (self.repository_map, self.rid_trans, find_idmap), 899 (self.note_map, self.nid_trans, find_idmap), 900 (self.tag_map, self.tag_trans, find_idmap), 901 (self.reference_map, self.reference_map_primary_map, 902 find_primary_handle), 903 (self.reference_map, self.reference_map_referenced_map, 904 find_referenced_handle), 905 ] 906 907 flags = DBFLAGS_R if self.readonly else DBFLAGS_O 908 for (dbmap, a_map, a_find) in assoc: 909 dbmap.associate(a_map, a_find, flags=flags) 910 911 self.secondary_connected = True 912 self.smap_index = len(self.source_map) 913 self.cmap_index = len(self.citation_map) 914 self.emap_index = len(self.event_map) 915 self.pmap_index = len(self.person_map) 916 self.fmap_index = len(self.family_map) 917 self.lmap_index = len(self.place_map) 918 self.omap_index = len(self.media_map) 919 self.rmap_index = len(self.repository_map) 920 self.nmap_index = len(self.note_map) 921 922 @catch_db_error 923 def rebuild_secondary(self, callback=None): 924 if self.readonly: 925 return 926 927 table_flags = DBFLAGS_O 928 929 # remove existing secondary indices 930 931 items = [ 932 ( self.id_trans, IDTRANS ), 933 ( self.surnames, SURNAMES ), 934 ( self.fid_trans, FIDTRANS ), 935 ( self.pid_trans, PIDTRANS ), 936 ( self.oid_trans, OIDTRANS ), 937 ( self.eid_trans, EIDTRANS ), 938 ( self.rid_trans, RIDTRANS ), 939 ( self.nid_trans, NIDTRANS ), 940 ( self.cid_trans, CIDTRANS ), 941 ( self.tag_trans, TAGTRANS ), 942 ( self.parents, PPARENT ), 943 ( self.reference_map_primary_map, REF_PRI), 944 ( self.reference_map_referenced_map, REF_REF), 945 ] 946 947 index = 1 948 for (database, name) in items: 949 database.close() 950 _db = db.DB(self.env) 951 try: 952 _db.remove(_mkname(self.full_name, name), name) 953 except db.DBNoSuchFileError: 954 pass 955 if callback: 956 callback(index) 957 index += 1 958 959 if callback: 960 callback(11) 961 962 # Set flag saying that we have removed secondary indices 963 # and then call the creating routine 964 self.secondary_connected = False 965 self.__connect_secondary() 966 if callback: 967 callback(12) 968 969 @catch_db_error 970 def find_place_child_handles(self, handle): 971 """ 972 Find all child places having the given place as the primary parent. 973 """ 974 parent_cur = self.get_place_parent_cursor() 975 976 try: 977 ret = parent_cur.set(handle.encode('utf-8')) 978 except: 979 ret = None 980 981 while (ret is not None): 982 (key, data) = ret 983 984 ### FIXME: this is a dirty hack that works without no 985 ### sensible explanation. For some reason, for a readonly 986 ### database, secondary index returns a primary table key 987 ### corresponding to the data, not the data. 988 if self.readonly: 989 data = self.place_map.get(data) 990 else: 991 data = pickle.loads(data) 992 993 yield data[0] 994 ret = parent_cur.next_dup() 995 996 parent_cur.close() 997 998 @catch_db_error 999 def find_backlink_handles(self, handle, include_classes=None): 1000 """ 1001 Find all objects that hold a reference to the object handle. 1002 1003 Returns an interator over a list of (class_name, handle) tuples. 1004 1005 :param handle: handle of the object to search for. 1006 :type handle: database handle 1007 :param include_classes: list of class names to include in the results. 1008 Default: None means include all classes. 1009 :type include_classes: list of class names 1010 1011 Note that this is a generator function, it returns a iterator for 1012 use in loops. If you want a list of the results use:: 1013 1014 result_list = list(find_backlink_handles(handle)) 1015 """ 1016 # Use the secondary index to locate all the reference_map entries 1017 # that include a reference to the object we are looking for. 1018 referenced_cur = self._get_reference_map_referenced_cursor() 1019 1020 try: 1021 ret = referenced_cur.set(handle.encode('utf-8')) 1022 except: 1023 ret = None 1024 1025 while (ret is not None): 1026 (key, data) = ret 1027 1028 # data values are of the form: 1029 # ((primary_object_class_name, primary_object_handle), 1030 # (referenced_object_class_name, referenced_object_handle)) 1031 # so we need the first tuple to give us the type to compare 1032 1033 ### FIXME: this is a dirty hack that works without no 1034 ### sensible explanation. For some reason, for a readonly 1035 ### database, secondary index returns a primary table key 1036 ### corresponding to the data, not the data. 1037 if self.readonly: 1038 data = self.reference_map.get(data) 1039 else: 1040 data = pickle.loads(data) 1041 1042 key, handle = data[0][:2] 1043 name = KEY_TO_CLASS_MAP[key] 1044 assert name == KEY_TO_CLASS_MAP[data[0][0]] 1045 assert handle == data[0][1] 1046 if (include_classes is None or 1047 name in include_classes): 1048 yield (name, handle) 1049 1050 ret = referenced_cur.next_dup() 1051 1052 referenced_cur.close() 1053 1054 def _delete_primary_from_reference_map(self, handle, transaction, txn=None): 1055 """ 1056 Remove all references to the primary object from the reference_map. 1057 handle should be utf-8 1058 """ 1059 primary_cur = self._get_reference_map_primary_cursor() 1060 1061 try: 1062 ret = primary_cur.set(handle) 1063 except: 1064 ret = None 1065 1066 remove_list = set() 1067 while (ret is not None): 1068 (key, data) = ret 1069 1070 # data values are of the form: 1071 # ((primary_object_class_name, primary_object_handle), 1072 # (referenced_object_class_name, referenced_object_handle)) 1073 1074 # so we need the second tuple give us a reference that we can 1075 # combine with the primary_handle to get the main key. 1076 main_key = (handle.decode('utf-8'), pickle.loads(data)[1][1]) 1077 1078 # The trick is not to remove while inside the cursor, 1079 # but collect them all and remove after the cursor is closed 1080 remove_list.add(main_key) 1081 1082 ret = primary_cur.next_dup() 1083 1084 primary_cur.close() 1085 1086 # Now that the cursor is closed, we can remove things 1087 for main_key in remove_list: 1088 self.__remove_reference(main_key, transaction, txn) 1089 1090 def _update_reference_map(self, obj, transaction, txn=None): 1091 """ 1092 If txn is given, then changes are written right away using txn. 1093 """ 1094 1095 # Add references to the reference_map for all primary object referenced 1096 # from the primary object 'obj' or any of its secondary objects. 1097 handle = obj.handle 1098 existing_references = set() 1099 primary_cur = self._get_reference_map_primary_cursor() 1100 key = handle.encode('utf-8') 1101 try: 1102 ret = primary_cur.set(key) 1103 except: 1104 ret = None 1105 1106 while (ret is not None): 1107 (key, data) = ret 1108 # data values are of the form: 1109 # ((primary_object_class_name, primary_object_handle), 1110 # (referenced_object_class_name, referenced_object_handle)) 1111 # so we need the second tuple give us a reference that we can 1112 # compare with what is returned from 1113 # get_referenced_handles_recursively 1114 1115 # secondary DBs are not DBShelf's, so we need to do pickling 1116 # and unpickling ourselves here 1117 existing_reference = pickle.loads(data)[1] 1118 existing_references.add((KEY_TO_CLASS_MAP[existing_reference[0]], 1119 existing_reference[1])) 1120 ret = primary_cur.next_dup() 1121 primary_cur.close() 1122 1123 # Once we have the list of rows that already have a reference 1124 # we need to compare it with the list of objects that are 1125 # still references from the primary object. 1126 current_references = set(obj.get_referenced_handles_recursively()) 1127 no_longer_required_references = existing_references.difference( 1128 current_references) 1129 new_references = current_references.difference(existing_references) 1130 1131 # handle addition of new references 1132 for (ref_class_name, ref_handle) in new_references: 1133 data = ((CLASS_TO_KEY_MAP[obj.__class__.__name__], handle), 1134 (CLASS_TO_KEY_MAP[ref_class_name], ref_handle),) 1135 self.__add_reference((handle, ref_handle), data, transaction, txn) 1136 1137 # handle deletion of old references 1138 for (ref_class_name, ref_handle) in no_longer_required_references: 1139 try: 1140 self.__remove_reference((handle, ref_handle), transaction, txn) 1141 except: 1142 # ignore missing old reference 1143 pass 1144 1145 def __remove_reference(self, key, transaction, txn): 1146 """ 1147 Remove the reference specified by the key, preserving the change in 1148 the passed transaction. 1149 """ 1150 if isinstance(key, tuple): 1151 #create a byte string key, first validity check in python 3! 1152 for val in key: 1153 if isinstance(val, bytes): 1154 raise DbError(_('An attempt is made to save a reference key ' 1155 'which is partly bytecode, this is not allowed.\n' 1156 'Key is %s') % str(key)) 1157 key = str(key) 1158 key = key.encode('utf-8') 1159 if not self.readonly: 1160 if not transaction.batch: 1161 old_data = self.reference_map.get(key, txn=txn) 1162 transaction.add(REFERENCE_KEY, TXNDEL, key, old_data, None) 1163 #transaction.reference_del.append(str(key)) 1164 self.reference_map.delete(key, txn=txn) 1165 1166 def __add_reference(self, key, data, transaction, txn): 1167 """ 1168 Add the reference specified by the key and the data, preserving the 1169 change in the passed transaction. 1170 """ 1171 if isinstance(key, tuple): 1172 #create a string key 1173 key = str(key) 1174 key = key.encode('utf-8') 1175 if self.readonly or not key: 1176 return 1177 1178 self.reference_map.put(key, data, txn=txn) 1179 if not transaction.batch: 1180 transaction.add(REFERENCE_KEY, TXNADD, key, None, data) 1181 #transaction.reference_add.append((str(key), data)) 1182 1183 @catch_db_error 1184 def reindex_reference_map(self, callback): 1185 """ 1186 Reindex all primary records in the database. 1187 1188 This will be a slow process for large databases. 1189 """ 1190 1191 # First, remove the reference map and related tables 1192 1193 db_maps = [ 1194 ("reference_map_referenced_map", REF_REF), 1195 ("reference_map_primary_map", REF_PRI), 1196 ("reference_map", REF_MAP), 1197 ] 1198 1199 for index, (dbmap, dbname) in enumerate(db_maps): 1200 getattr(self, dbmap).close() 1201 _db = db.DB(self.env) 1202 try: 1203 _db.remove(_mkname(self.full_name, dbname), dbname) 1204 except db.DBNoSuchFileError: 1205 pass 1206 callback(index+1) 1207 1208 # Open reference_map and primary map 1209 self.reference_map = self.__open_shelf(self.full_name, REF_MAP, 1210 dbtype=db.DB_BTREE) 1211 1212 self.reference_map_primary_map = self.__open_db(self.full_name, 1213 REF_PRI, db.DB_BTREE, db.DB_DUP) 1214 1215 self.reference_map.associate(self.reference_map_primary_map, 1216 find_primary_handle, DBFLAGS_O) 1217 1218 # Make a tuple of the functions and classes that we need for 1219 # each of the primary object tables. 1220 1221 with DbTxn(_("Rebuild reference map"), self, batch=True, 1222 no_magic=True) as transaction: 1223 callback(4) 1224 1225 primary_table = ( 1226 (self.get_person_cursor, Person), 1227 (self.get_family_cursor, Family), 1228 (self.get_event_cursor, Event), 1229 (self.get_place_cursor, Place), 1230 (self.get_source_cursor, Source), 1231 (self.get_citation_cursor, Citation), 1232 (self.get_media_cursor, Media), 1233 (self.get_repository_cursor, Repository), 1234 (self.get_note_cursor, Note), 1235 (self.get_tag_cursor, Tag), 1236 ) 1237 1238 # Now we use the functions and classes defined above 1239 # to loop through each of the primary object tables. 1240 1241 for cursor_func, class_func in primary_table: 1242 logging.info("Rebuilding %s reference map" % 1243 class_func.__name__) 1244 with cursor_func() as cursor: 1245 for found_handle, val in cursor: 1246 obj = class_func() 1247 obj.unserialize(val) 1248 with BSDDBTxn(self.env) as txn: 1249 self._update_reference_map(obj, 1250 transaction, txn.txn) 1251 1252 callback(5) 1253 1254 self.reference_map_referenced_map = self.__open_db(self.full_name, 1255 REF_REF, db.DB_BTREE, db.DB_DUP|db.DB_DUPSORT) 1256 1257 flags = DBFLAGS_R if self.readonly else DBFLAGS_O 1258 self.reference_map.associate(self.reference_map_referenced_map, 1259 find_referenced_handle, flags=flags) 1260 callback(6) 1261 1262 def __close_metadata(self): 1263 if not self.readonly: 1264 # Start transaction 1265 with BSDDBTxn(self.env, self.metadata) as txn: 1266 1267 # name display formats 1268 txn.put(b'name_formats', self.name_formats) 1269 1270 # database owner 1271 owner_data = self.owner.serialize() 1272 txn.put(b'researcher', owner_data) 1273 1274 # bookmarks 1275 txn.put(b'bookmarks', self.bookmarks.get()) 1276 txn.put(b'family_bookmarks', self.family_bookmarks.get()) 1277 txn.put(b'event_bookmarks', self.event_bookmarks.get()) 1278 txn.put(b'source_bookmarks', self.source_bookmarks.get()) 1279 txn.put(b'citation_bookmarks', self.citation_bookmarks.get()) 1280 txn.put(b'place_bookmarks', self.place_bookmarks.get()) 1281 txn.put(b'repo_bookmarks', self.repo_bookmarks.get()) 1282 txn.put(b'media_bookmarks', self.media_bookmarks.get()) 1283 txn.put(b'note_bookmarks', self.note_bookmarks.get()) 1284 1285 # gender stats 1286 txn.put(b'gender_stats', self.genderStats.save_stats()) 1287 1288 # Custom type values 1289 txn.put(b'event_names', list(self.event_names)) 1290 txn.put(b'fattr_names', list(self.family_attributes)) 1291 txn.put(b'pattr_names', list(self.individual_attributes)) 1292 txn.put(b'sattr_names', list(self.source_attributes)) 1293 txn.put(b'marker_names', list(self.marker_names)) 1294 txn.put(b'child_refs', list(self.child_ref_types)) 1295 txn.put(b'family_rels', list(self.family_rel_types)) 1296 txn.put(b'event_roles', list(self.event_role_names)) 1297 txn.put(b'name_types', list(self.name_types)) 1298 txn.put(b'origin_types', list(self.origin_types)) 1299 txn.put(b'repo_types', list(self.repository_types)) 1300 txn.put(b'note_types', list(self.note_types)) 1301 txn.put(b'sm_types', list(self.source_media_types)) 1302 txn.put(b'url_types', list(self.url_types)) 1303 txn.put(b'mattr_names', list(self.media_attributes)) 1304 txn.put(b'eattr_names', list(self.event_attributes)) 1305 txn.put(b'place_types', list(self.place_types)) 1306 1307 # name display formats 1308 txn.put(b'surname_list', self.surname_list) 1309 1310 self.metadata.close() 1311 1312 def __close_early(self): 1313 """ 1314 Bail out if the incompatible version is discovered: 1315 * close cleanly to not damage data/env 1316 """ 1317 if hasattr(self, 'metadata') and self.metadata: 1318 self.metadata.close() 1319 self.env.close() 1320 self.metadata = None 1321 self.env = None 1322 self.db_is_open = False 1323 1324 @catch_db_error 1325 def close(self, update=True, user=None): 1326 """ 1327 Close the database. 1328 if update is False, don't change access times, etc. 1329 """ 1330 if not self.db_is_open: 1331 return 1332 if self.txn: 1333 self.transaction_abort(self.transaction) 1334 if not self.readonly: 1335 self.env.txn_checkpoint() 1336 1337 self.__close_metadata() 1338 self.name_group.close() 1339 self.surnames.close() 1340 self.parents.close() 1341 self.id_trans.close() 1342 self.fid_trans.close() 1343 self.eid_trans.close() 1344 self.rid_trans.close() 1345 self.nid_trans.close() 1346 self.oid_trans.close() 1347 self.sid_trans.close() 1348 self.cid_trans.close() 1349 self.pid_trans.close() 1350 self.tag_trans.close() 1351 self.reference_map_primary_map.close() 1352 self.reference_map_referenced_map.close() 1353 self.reference_map.close() 1354 self.secondary_connected = False 1355 1356 # primary databases must be closed after secondary indexes, or 1357 # we run into problems with any active cursors. 1358 self.person_map.close() 1359 self.family_map.close() 1360 self.repository_map.close() 1361 self.note_map.close() 1362 self.place_map.close() 1363 self.source_map.close() 1364 self.citation_map.close() 1365 self.media_map.close() 1366 self.event_map.close() 1367 self.tag_map.close() 1368 self.env.close() 1369 self.__close_undodb() 1370 1371 self.person_map = None 1372 self.family_map = None 1373 self.repository_map = None 1374 self.note_map = None 1375 self.place_map = None 1376 self.source_map = None 1377 self.citation_map = None 1378 self.media_map = None 1379 self.event_map = None 1380 self.tag_map = None 1381 self.surnames = None 1382 self.env = None 1383 self.metadata = None 1384 self.db_is_open = False 1385 self.surname_list = None 1386 1387 DbBsddbRead.close(self) 1388 1389 self.person_map = None 1390 self.family_map = None 1391 self.repository_map = None 1392 self.note_map = None 1393 self.place_map = None 1394 self.source_map = None 1395 self.citation_map = None 1396 self.media_map = None 1397 self.event_map = None 1398 self.tag_map = None 1399 self.reference_map_primary_map = None 1400 self.reference_map_referenced_map = None 1401 self.reference_map = None 1402 self.undo_callback = None 1403 self.redo_callback = None 1404 self.undo_history_callback = None 1405 self.undodb = None 1406 1407 try: 1408 clear_lock_file(self.get_save_path()) 1409 except IOError: 1410 pass 1411 1412 def __add_object(self, obj, transaction, find_next_func, commit_func): 1413 if find_next_func and not obj.gramps_id: 1414 obj.gramps_id = find_next_func() 1415 if not obj.handle: 1416 obj.handle = create_id() 1417 commit_func(obj, transaction) 1418 return obj.handle 1419 1420 def add_person(self, person, transaction, set_gid=True): 1421 """ 1422 Add a Person to the database, assigning internal IDs if they have 1423 not already been defined. 1424 1425 If not set_gid, then gramps_id is not set. 1426 """ 1427 handle = self.__add_object(person, transaction, 1428 self.find_next_person_gramps_id if set_gid else None, 1429 self.commit_person) 1430 return handle 1431 1432 def add_family(self, family, transaction, set_gid=True): 1433 """ 1434 Add a Family to the database, assigning internal IDs if they have 1435 not already been defined. 1436 1437 If not set_gid, then gramps_id is not set. 1438 """ 1439 return self.__add_object(family, transaction, 1440 self.find_next_family_gramps_id if set_gid else None, 1441 self.commit_family) 1442 1443 def add_source(self, source, transaction, set_gid=True): 1444 """ 1445 Add a Source to the database, assigning internal IDs if they have 1446 not already been defined. 1447 1448 If not set_gid, then gramps_id is not set. 1449 """ 1450 return self.__add_object(source, transaction, 1451 self.find_next_source_gramps_id if set_gid else None, 1452 self.commit_source) 1453 1454 def add_citation(self, citation, transaction, set_gid=True): 1455 """ 1456 Add a Citation to the database, assigning internal IDs if they have 1457 not already been defined. 1458 1459 If not set_gid, then gramps_id is not set. 1460 """ 1461 return self.__add_object(citation, transaction, 1462 self.find_next_citation_gramps_id if set_gid else None, 1463 self.commit_citation) 1464 1465 def add_event(self, event, transaction, set_gid=True): 1466 """ 1467 Add an Event to the database, assigning internal IDs if they have 1468 not already been defined. 1469 1470 If not set_gid, then gramps_id is not set. 1471 """ 1472 if event.type.is_custom(): 1473 self.event_names.add(str(event.type)) 1474 return self.__add_object(event, transaction, 1475 self.find_next_event_gramps_id if set_gid else None, 1476 self.commit_event) 1477 1478 def add_place(self, place, transaction, set_gid=True): 1479 """ 1480 Add a Place to the database, assigning internal IDs if they have 1481 not already been defined. 1482 1483 If not set_gid, then gramps_id is not set. 1484 """ 1485 return self.__add_object(place, transaction, 1486 self.find_next_place_gramps_id if set_gid else None, 1487 self.commit_place) 1488 1489 def add_media(self, media, transaction, set_gid=True): 1490 """ 1491 Add a Media to the database, assigning internal IDs if they have 1492 not already been defined. 1493 1494 If not set_gid, then gramps_id is not set. 1495 """ 1496 return self.__add_object(media, transaction, 1497 self.find_next_media_gramps_id if set_gid else None, 1498 self.commit_media) 1499 1500 def add_repository(self, obj, transaction, set_gid=True): 1501 """ 1502 Add a Repository to the database, assigning internal IDs if they have 1503 not already been defined. 1504 1505 If not set_gid, then gramps_id is not set. 1506 """ 1507 return self.__add_object(obj, transaction, 1508 self.find_next_repository_gramps_id if set_gid else None, 1509 self.commit_repository) 1510 1511 def add_note(self, obj, transaction, set_gid=True): 1512 """ 1513 Add a Note to the database, assigning internal IDs if they have 1514 not already been defined. 1515 1516 If not set_gid, then gramps_id is not set. 1517 """ 1518 return self.__add_object(obj, transaction, 1519 self.find_next_note_gramps_id if set_gid else None, 1520 self.commit_note) 1521 1522 def add_tag(self, obj, transaction): 1523 """ 1524 Add a Tag to the database, assigning a handle if it has not already 1525 been defined. 1526 """ 1527 return self.__add_object(obj, transaction, None, self.commit_tag) 1528 1529 def __do_remove(self, handle, transaction, data_map, key): 1530 if self.readonly or not handle: 1531 return 1532 1533 handle = handle.encode('utf-8') 1534 if transaction.batch: 1535 with BSDDBTxn(self.env, data_map) as txn: 1536 self._delete_primary_from_reference_map(handle, transaction, 1537 txn=txn.txn) 1538 txn.delete(handle) 1539 else: 1540 self._delete_primary_from_reference_map(handle, transaction, 1541 txn=self.txn) 1542 old_data = data_map.get(handle, txn=self.txn) 1543 data_map.delete(handle, txn=self.txn) 1544 transaction.add(key, TXNDEL, handle, old_data, None) 1545 1546 def remove_person(self, handle, transaction): 1547 """ 1548 Remove the Person specified by the database handle from the database, 1549 preserving the change in the passed transaction. 1550 """ 1551 1552 if self.readonly or not handle: 1553 return 1554 person = self.get_person_from_handle(handle) 1555 self.genderStats.uncount_person (person) 1556 self.remove_from_surname_list(person) 1557 handle = handle.encode('utf-8') 1558 if transaction.batch: 1559 with BSDDBTxn(self.env, self.person_map) as txn: 1560 self._delete_primary_from_reference_map(handle, transaction, 1561 txn=txn.txn) 1562 txn.delete(handle) 1563 else: 1564 self._delete_primary_from_reference_map(handle, transaction, 1565 txn=self.txn) 1566 self.person_map.delete(handle, txn=self.txn) 1567 transaction.add(PERSON_KEY, TXNDEL, handle, person.serialize(), None) 1568 1569 def remove_source(self, handle, transaction): 1570 """ 1571 Remove the Source specified by the database handle from the 1572 database, preserving the change in the passed transaction. 1573 """ 1574 self.__do_remove(handle, transaction, self.source_map, 1575 SOURCE_KEY) 1576 1577 def remove_citation(self, handle, transaction): 1578 """ 1579 Remove the Citation specified by the database handle from the 1580 database, preserving the change in the passed transaction. 1581 """ 1582 self.__do_remove(handle, transaction, self.citation_map, 1583 CITATION_KEY) 1584 1585 def remove_event(self, handle, transaction): 1586 """ 1587 Remove the Event specified by the database handle from the 1588 database, preserving the change in the passed transaction. 1589 """ 1590 self.__do_remove(handle, transaction, self.event_map, 1591 EVENT_KEY) 1592 1593 def remove_media(self, handle, transaction): 1594 """ 1595 Remove the MediaPerson specified by the database handle from the 1596 database, preserving the change in the passed transaction. 1597 """ 1598 self.__do_remove(handle, transaction, self.media_map, 1599 MEDIA_KEY) 1600 1601 def remove_place(self, handle, transaction): 1602 """ 1603 Remove the Place specified by the database handle from the 1604 database, preserving the change in the passed transaction. 1605 """ 1606 self.__do_remove(handle, transaction, self.place_map, 1607 PLACE_KEY) 1608 1609 def remove_family(self, handle, transaction): 1610 """ 1611 Remove the Family specified by the database handle from the 1612 database, preserving the change in the passed transaction. 1613 """ 1614 self.__do_remove(handle, transaction, self.family_map, 1615 FAMILY_KEY) 1616 1617 def remove_repository(self, handle, transaction): 1618 """ 1619 Remove the Repository specified by the database handle from the 1620 database, preserving the change in the passed transaction. 1621 """ 1622 self.__do_remove(handle, transaction, self.repository_map, 1623 REPOSITORY_KEY) 1624 1625 def remove_note(self, handle, transaction): 1626 """ 1627 Remove the Note specified by the database handle from the 1628 database, preserving the change in the passed transaction. 1629 """ 1630 self.__do_remove(handle, transaction, self.note_map, 1631 NOTE_KEY) 1632 1633 def remove_tag(self, handle, transaction): 1634 """ 1635 Remove the Tag specified by the database handle from the 1636 database, preserving the change in the passed transaction. 1637 """ 1638 self.__do_remove(handle, transaction, self.tag_map, 1639 TAG_KEY) 1640 1641 @catch_db_error 1642 def set_name_group_mapping(self, name, group): 1643 if not self.readonly: 1644 # Start transaction 1645 with BSDDBTxn(self.env, self.name_group) as txn: 1646 sname = name.encode('utf-8') 1647 data = txn.get(sname) 1648 if data is not None: 1649 txn.delete(sname) 1650 if group is not None: 1651 txn.put(sname, group.encode('utf-8')) 1652 if group is None: 1653 grouppar = '' 1654 else: 1655 grouppar = group 1656 self.emit('person-groupname-rebuild', (name, grouppar)) 1657 1658 @catch_db_error 1659 def __build_surname_list(self): 1660 """ 1661 Build surname list for use in autocompletion 1662 This is a list of unicode objects, which are decoded from the utf-8 in 1663 bsddb 1664 """ 1665 self.surname_list = sorted( 1666 [s.decode('utf-8') for s in set(self.surnames.keys())], 1667 key=glocale.sort_key) 1668 1669 def add_to_surname_list(self, person, batch_transaction): 1670 """ 1671 Add surname to surname list 1672 """ 1673 if batch_transaction: 1674 return 1675 name = find_surname_name(person.handle, 1676 person.get_primary_name().serialize()) 1677 i = bisect.bisect(self.surname_list, name) 1678 if 0 < i <= len(self.surname_list): 1679 if self.surname_list[i-1] != name: 1680 self.surname_list.insert(i, name) 1681 else: 1682 self.surname_list.insert(i, name) 1683 1684 @catch_db_error 1685 def remove_from_surname_list(self, person): 1686 """ 1687 Check whether there are persons with the same surname left in 1688 the database. 1689 1690 If not then we need to remove the name from the list. 1691 The function must be overridden in the derived class. 1692 """ 1693 uname = find_surname_name(person.handle, 1694 person.get_primary_name().serialize()) 1695 name = uname.encode('utf-8') 1696 try: 1697 cursor = self.surnames.cursor(txn=self.txn) 1698 cursor_position = cursor.set(name) 1699 if cursor_position is not None and cursor.count() == 1: 1700 #surname list contains unicode objects 1701 i = bisect.bisect(self.surname_list, uname) 1702 if 0 <= i-1 < len(self.surname_list): 1703 del self.surname_list[i-1] 1704 except db.DBError as err: 1705 if str(err) == "(0, 'DB object has been closed')": 1706 pass # A batch transaction closes the surnames db table. 1707 else: 1708 raise 1709 finally: 1710 if 'cursor' in locals(): 1711 cursor.close() 1712 1713 def _commit_base(self, obj, data_map, key, transaction, change_time): 1714 """ 1715 Commit the specified object to the database, storing the changes as 1716 part of the transaction. 1717 """ 1718 if self.readonly or not obj or not obj.handle: 1719 return 1720 1721 obj.change = int(change_time or time.time()) 1722 handle = obj.handle 1723 handle = handle.encode('utf-8') 1724 1725 self._update_reference_map(obj, transaction, self.txn) 1726 1727 new_data = obj.serialize() 1728 old_data = None 1729 if not transaction.batch: 1730 old_data = data_map.get(handle, txn=self.txn) 1731 op = TXNUPD if old_data else TXNADD 1732 transaction.add(key, op, handle, old_data, new_data) 1733 data_map.put(handle, new_data, txn=self.txn) 1734 return old_data 1735 1736 def commit_person(self, person, transaction, change_time=None): 1737 """ 1738 Commit the specified Person to the database, storing the changes as 1739 part of the transaction. 1740 """ 1741 old_data = self._commit_base( 1742 person, self.person_map, PERSON_KEY, transaction, change_time) 1743 1744 if old_data: 1745 old_person = Person(old_data) 1746 1747 # Update gender statistics if necessary 1748 if (old_person.gender != person.gender or 1749 old_person.primary_name.first_name != 1750 person.primary_name.first_name): 1751 1752 self.genderStats.uncount_person(old_person) 1753 self.genderStats.count_person(person) 1754 1755 # Update surname list if necessary 1756 if (find_surname_name(old_person.handle, 1757 old_person.primary_name.serialize()) != 1758 find_surname_name(person.handle, 1759 person.primary_name.serialize())): 1760 self.remove_from_surname_list(old_person) 1761 self.add_to_surname_list(person, transaction.batch) 1762 else: 1763 self.genderStats.count_person(person) 1764 self.add_to_surname_list(person, transaction.batch) 1765 1766 self.individual_attributes.update( 1767 [str(attr.type) for attr in person.attribute_list 1768 if attr.type.is_custom() and str(attr.type)]) 1769 1770 self.event_role_names.update([str(eref.role) 1771 for eref in person.event_ref_list 1772 if eref.role.is_custom()]) 1773 1774 self.name_types.update([str(name.type) 1775 for name in ([person.primary_name] 1776 + person.alternate_names) 1777 if name.type.is_custom()]) 1778 all_surn = [] # new list we will use for storage 1779 all_surn += person.primary_name.get_surname_list() 1780 for asurname in person.alternate_names: 1781 all_surn += asurname.get_surname_list() 1782 self.origin_types.update([str(surn.origintype) for surn in all_surn 1783 if surn.origintype.is_custom()]) 1784 all_surn = None 1785 1786 self.url_types.update([str(url.type) for url in person.urls 1787 if url.type.is_custom()]) 1788 1789 attr_list = [] 1790 for mref in person.media_list: 1791 attr_list += [str(attr.type) for attr in mref.attribute_list 1792 if attr.type.is_custom() and str(attr.type)] 1793 self.media_attributes.update(attr_list) 1794 1795 def commit_media(self, obj, transaction, change_time=None): 1796 """ 1797 Commit the specified Media to the database, storing the changes 1798 as part of the transaction. 1799 """ 1800 self._commit_base(obj, self.media_map, MEDIA_KEY, 1801 transaction, change_time) 1802 1803 self.media_attributes.update( 1804 [str(attr.type) for attr in obj.attribute_list 1805 if attr.type.is_custom() and str(attr.type)]) 1806 1807 def commit_source(self, source, transaction, change_time=None): 1808 """ 1809 Commit the specified Source to the database, storing the changes as 1810 part of the transaction. 1811 """ 1812 self._commit_base(source, self.source_map, SOURCE_KEY, 1813 transaction, change_time) 1814 1815 self.source_media_types.update( 1816 [str(ref.media_type) for ref in source.reporef_list 1817 if ref.media_type.is_custom()]) 1818 1819 attr_list = [] 1820 for mref in source.media_list: 1821 attr_list += [str(attr.type) for attr in mref.attribute_list 1822 if attr.type.is_custom() and str(attr.type)] 1823 self.media_attributes.update(attr_list) 1824 1825 self.source_attributes.update( 1826 [str(attr.type) for attr in source.attribute_list 1827 if attr.type.is_custom() and str(attr.type)]) 1828 1829 def commit_citation(self, citation, transaction, change_time=None): 1830 """ 1831 Commit the specified Citation to the database, storing the changes as 1832 part of the transaction. 1833 """ 1834 self._commit_base(citation, self.citation_map, CITATION_KEY, 1835 transaction, change_time) 1836 1837 attr_list = [] 1838 for mref in citation.media_list: 1839 attr_list += [str(attr.type) for attr in mref.attribute_list 1840 if attr.type.is_custom() and str(attr.type)] 1841 self.media_attributes.update(attr_list) 1842 1843 self.source_attributes.update( 1844 [str(attr.type) for attr in citation.attribute_list 1845 if attr.type.is_custom() and str(attr.type)]) 1846 1847 def commit_place(self, place, transaction, change_time=None): 1848 """ 1849 Commit the specified Place to the database, storing the changes as 1850 part of the transaction. 1851 """ 1852 self._commit_base(place, self.place_map, PLACE_KEY, 1853 transaction, change_time) 1854 1855 if place.get_type().is_custom(): 1856 self.place_types.add(str(place.get_type())) 1857 1858 self.url_types.update([str(url.type) for url in place.urls 1859 if url.type.is_custom()]) 1860 1861 attr_list = [] 1862 for mref in place.media_list: 1863 attr_list += [str(attr.type) for attr in mref.attribute_list 1864 if attr.type.is_custom() and str(attr.type)] 1865 self.media_attributes.update(attr_list) 1866 1867 def commit_event(self, event, transaction, change_time=None): 1868 """ 1869 Commit the specified Event to the database, storing the changes as 1870 part of the transaction. 1871 """ 1872 self._commit_base(event, self.event_map, EVENT_KEY, 1873 transaction, change_time) 1874 1875 self.event_attributes.update( 1876 [str(attr.type) for attr in event.attribute_list 1877 if attr.type.is_custom() and str(attr.type)]) 1878 1879 if event.type.is_custom(): 1880 self.event_names.add(str(event.type)) 1881 1882 attr_list = [] 1883 for mref in event.media_list: 1884 attr_list += [str(attr.type) for attr in mref.attribute_list 1885 if attr.type.is_custom() and str(attr.type)] 1886 self.media_attributes.update(attr_list) 1887 1888 def commit_family(self, family, transaction, change_time=None): 1889 """ 1890 Commit the specified Family to the database, storing the changes as 1891 part of the transaction. 1892 """ 1893 self._commit_base(family, self.family_map, FAMILY_KEY, 1894 transaction, change_time) 1895 1896 self.family_attributes.update( 1897 [str(attr.type) for attr in family.attribute_list 1898 if attr.type.is_custom() and str(attr.type)]) 1899 1900 rel_list = [] 1901 for ref in family.child_ref_list: 1902 if ref.frel.is_custom(): 1903 rel_list.append(str(ref.frel)) 1904 if ref.mrel.is_custom(): 1905 rel_list.append(str(ref.mrel)) 1906 self.child_ref_types.update(rel_list) 1907 1908 self.event_role_names.update( 1909 [str(eref.role) for eref in family.event_ref_list 1910 if eref.role.is_custom()]) 1911 1912 if family.type.is_custom(): 1913 self.family_rel_types.add(str(family.type)) 1914 1915 attr_list = [] 1916 for mref in family.media_list: 1917 attr_list += [str(attr.type) for attr in mref.attribute_list 1918 if attr.type.is_custom() and str(attr.type)] 1919 self.media_attributes.update(attr_list) 1920 1921 def commit_repository(self, repository, transaction, change_time=None): 1922 """ 1923 Commit the specified Repository to the database, storing the changes 1924 as part of the transaction. 1925 """ 1926 self._commit_base(repository, self.repository_map, REPOSITORY_KEY, 1927 transaction, change_time) 1928 1929 if repository.type.is_custom(): 1930 self.repository_types.add(str(repository.type)) 1931 1932 self.url_types.update([str(url.type) for url in repository.urls 1933 if url.type.is_custom()]) 1934 1935 def commit_note(self, note, transaction, change_time=None): 1936 """ 1937 Commit the specified Note to the database, storing the changes as part 1938 of the transaction. 1939 """ 1940 self._commit_base(note, self.note_map, NOTE_KEY, 1941 transaction, change_time) 1942 1943 if note.type.is_custom(): 1944 self.note_types.add(str(note.type)) 1945 1946 def commit_tag(self, tag, transaction, change_time=None): 1947 """ 1948 Commit the specified Tag to the database, storing the changes as part 1949 of the transaction. 1950 """ 1951 self._commit_base(tag, self.tag_map, TAG_KEY, 1952 transaction, change_time) 1953 1954 def get_from_handle(self, handle, class_type, data_map): 1955 if handle is None: 1956 raise HandleError('Handle is None') 1957 if not handle: 1958 raise HandleError('Handle is empty') 1959 data = data_map.get(handle.encode('utf-8'), txn=self.txn) 1960 if data: 1961 newobj = class_type() 1962 newobj.unserialize(data) 1963 return newobj 1964 raise HandleError('Handle %s not found' % handle) 1965 1966 @catch_db_error 1967 def transaction_begin(self, transaction): 1968 """ 1969 Prepare the database for the start of a new Transaction. 1970 1971 Supported transaction parameters: 1972 1973 no_magic 1974 Boolean, defaults to False, indicating if secondary indices should be 1975 disconnected. 1976 """ 1977 _LOG.debug(" %s%sDbBsddb %s transaction begin for '%s'" 1978 % ("Magic " if not getattr(transaction, 'no_magic', False) 1979 else "", 1980 "Batch " if transaction.batch else "", 1981 hex(id(self)), 1982 transaction.get_description())) 1983 if self.txn is not None: 1984 msg = self.transaction.get_description() 1985 self.transaction_abort(self.transaction) 1986 raise DbError(_('A second transaction is started while there' 1987 ' is still a transaction, "%s", active in the database.') % msg) 1988 1989 if not isinstance(transaction, DbTxn) or len(transaction) != 0: 1990 raise TypeError("transaction_begin must be called with an empty " 1991 "instance of DbTxn which typically happens by using the " 1992 "DbTxn instance as a context manager.") 1993 1994 self.transaction = transaction 1995 if transaction.batch: 1996 # A batch transaction does not store the commits 1997 # Aborting the session completely will become impossible. 1998 self.abort_possible = False 1999 # Undo is also impossible after batch transaction 2000 self.undodb.clear() 2001 self.env.txn_checkpoint() 2002 2003 if (self.secondary_connected and 2004 not getattr(transaction, 'no_magic', False)): 2005 # Disconnect unneeded secondary indices 2006 self.surnames.close() 2007 _db = db.DB(self.env) 2008 try: 2009 _db.remove(_mkname(self.full_name, SURNAMES), SURNAMES) 2010 except db.DBNoSuchFileError: 2011 pass 2012 2013 self.reference_map_referenced_map.close() 2014 _db = db.DB(self.env) 2015 try: 2016 _db.remove(_mkname(self.full_name, REF_REF), REF_REF) 2017 except db.DBNoSuchFileError: 2018 pass 2019 else: 2020 self.bsddbtxn = BSDDBTxn(self.env) 2021 self.txn = self.bsddbtxn.begin() 2022 return transaction 2023 2024 @catch_db_error 2025 def transaction_commit(self, transaction): 2026 """ 2027 Make the changes to the database final and add the content of the 2028 transaction to the undo database. 2029 """ 2030 msg = transaction.get_description() 2031 if self._LOG_ALL: 2032 _LOG.debug("%s: Transaction commit '%s'\n" 2033 % (self.__class__.__name__, msg)) 2034 2035 if self.readonly: 2036 return 2037 2038 if self.txn is not None: 2039 assert msg != '' 2040 self.bsddbtxn.commit() 2041 self.bsddbtxn = None 2042 self.txn = None 2043 self.env.log_flush() 2044 if not transaction.batch: 2045 # do deletes and adds first 2046 for trans_type in [TXNDEL, TXNADD, TXNUPD]: 2047 for obj_type in range(11): 2048 if obj_type != REFERENCE_KEY: 2049 self.__emit(transaction, obj_type, trans_type) 2050 self.transaction = None 2051 transaction.clear() 2052 self.undodb.commit(transaction, msg) 2053 self.__after_commit(transaction) 2054 self.has_changed = True 2055 _LOG.debug(" %s%sDbBsddb %s transaction commit for '%s'" 2056 % ("Magic " if not getattr(transaction, 'no_magic', False) 2057 else "", 2058 "Batch " if transaction.batch else "", 2059 hex(id(self)), 2060 transaction.get_description())) 2061 2062 def __emit(self, transaction, obj_type, trans_type): 2063 """ 2064 Define helper function to do the actual emits 2065 """ 2066 if (obj_type, trans_type) in transaction: 2067 if trans_type == TXNDEL: 2068 handles = [handle.decode('utf-8') for handle, data in 2069 transaction[(obj_type, trans_type)]] 2070 else: 2071 handles = [handle.decode('utf-8') for handle, data in 2072 transaction[(obj_type, trans_type)] 2073 if (handle, None) not in transaction[(obj_type, 2074 TXNDEL)]] 2075 if handles: 2076 self.emit(KEY_TO_NAME_MAP[obj_type] + 2077 ['-add', '-update', '-delete'][trans_type], 2078 (handles, )) 2079 2080 def transaction_abort(self, transaction): 2081 """ 2082 Revert the changes made to the database so far during the transaction. 2083 """ 2084 if self._LOG_ALL: 2085 _LOG.debug("%s: Transaction abort '%s'\n" % 2086 (self.__class__.__name__, transaction.get_description())) 2087 2088 if self.readonly: 2089 return 2090 2091 if self.txn is not None: 2092 self.bsddbtxn.abort() 2093 self.bsddbtxn = None 2094 self.txn = None 2095 if not transaction.batch: 2096 # It can occur that the listview is already updated because of 2097 # the "model-treeview automatic update" combined with a 2098 # "while Gtk.events_pending(): Gtk.main_iteration() loop" 2099 # (typically used in a progress bar), so emit rebuild signals 2100 # to correct that. 2101 object_types = set([x[0] for x in list(transaction.keys())]) 2102 for object_type in object_types: 2103 if object_type == REFERENCE_KEY: 2104 continue 2105 self.emit('%s-rebuild' % KEY_TO_NAME_MAP[object_type], ()) 2106 self.transaction = None 2107 transaction.clear() 2108 transaction.first = None 2109 transaction.last = None 2110 self.__after_commit(transaction) 2111 2112 def __after_commit(self, transaction): 2113 """ 2114 Post-transaction commit processing 2115 """ 2116 if transaction.batch: 2117 self.env.txn_checkpoint() 2118 2119 if not getattr(transaction, 'no_magic', False): 2120 # create new secondary indices to replace the ones removed 2121 2122 self.surnames = self.__open_db(self.full_name, SURNAMES, 2123 db.DB_BTREE, db.DB_DUP | db.DB_DUPSORT) 2124 2125 self.person_map.associate(self.surnames, find_byte_surname, 2126 DBFLAGS_O) 2127 2128 self.reference_map_referenced_map = self.__open_db(self.full_name, 2129 REF_REF, db.DB_BTREE, db.DB_DUP|db.DB_DUPSORT) 2130 2131 self.reference_map.associate(self.reference_map_referenced_map, 2132 find_referenced_handle, DBFLAGS_O) 2133 2134 # Only build surname list after surname index is surely back 2135 self.__build_surname_list() 2136 2137 # Reset callbacks if necessary 2138 if transaction.batch or not len(transaction): 2139 return 2140 if self.undo_callback: 2141 self.undo_callback(_("_Undo %s") % transaction.get_description()) 2142 if self.redo_callback: 2143 self.redo_callback(None) 2144 if self.undo_history_callback: 2145 self.undo_history_callback() 2146 2147 def undo(self, update_history=True): 2148 return self.undodb.undo(update_history) 2149 2150 def redo(self, update_history=True): 2151 return self.undodb.redo(update_history) 2152 2153 def _gramps_upgrade(self, callback=None): 2154 UpdateCallback.__init__(self, callback) 2155 2156 version = self.metadata.get(b'version', default=_MINVERSION) 2157 2158 t = time.time() 2159 2160 from . import upgrade 2161 2162 if version < 14: 2163 upgrade.gramps_upgrade_14(self) 2164 if version < 15: 2165 upgrade.gramps_upgrade_15(self) 2166 if version < 16: 2167 upgrade.gramps_upgrade_16(self) 2168 if version < 17: 2169 upgrade.gramps_upgrade_17(self) 2170 if version < 18: 2171 upgrade.gramps_upgrade_18(self) 2172 if version < 19: 2173 upgrade.gramps_upgrade_19(self) 2174 2175 self.reset() 2176 self.set_total(6) 2177 self.__connect_secondary() 2178 self.rebuild_secondary() 2179 # Open undo database 2180 self.__open_undodb() 2181 self.db_is_open = True 2182 self.reindex_reference_map(self.update) 2183 self.reset() 2184 # Close undo database 2185 self.__close_undodb() 2186 self.db_is_open = False 2187 2188 2189 _LOG.debug("Upgrade time: %d seconds" % int(time.time()-t)) 2190 2191 def _set_auto_remove(self): 2192 """ 2193 BSDDB change log settings using new method with renamed attributes 2194 """ 2195 autoremove_flag = None 2196 autoremove_method = None 2197 for flag in ["DB_LOG_AUTO_REMOVE", "DB_LOG_AUTOREMOVE"]: 2198 if hasattr(db, flag): 2199 autoremove_flag = getattr(db, flag) 2200 break 2201 for method in ["log_set_config", "set_flags"]: 2202 if hasattr(self.env, method): 2203 autoremove_method = getattr(self.env, method) 2204 break 2205 if autoremove_method and autoremove_flag: 2206 autoremove_method(autoremove_flag, 1) 2207 else: 2208 _LOG.debug("Failed to set autoremove flag") 2209 2210 def _write_version(self, name): 2211 """Write version number for a newly created DB.""" 2212 full_name = os.path.abspath(name) 2213 2214 self.env = db.DBEnv() 2215 self.env.set_cachesize(0, DBCACHE) 2216 2217 # These env settings are only needed for Txn environment 2218 self.env.set_lk_max_locks(DBLOCKS) 2219 self.env.set_lk_max_objects(DBOBJECTS) 2220 2221 # clean up unused logs 2222 self._set_auto_remove() 2223 2224 # The DB_PRIVATE flag must go if we ever move to multi-user setup 2225 env_flags = db.DB_CREATE | db.DB_PRIVATE |\ 2226 db.DB_INIT_MPOOL |\ 2227 db.DB_INIT_LOG | db.DB_INIT_TXN 2228 2229 # As opposed to before, we always try recovery on databases 2230 env_flags |= db.DB_RECOVER 2231 2232 # Environment name is now based on the filename 2233 env_name = name 2234 2235 self.env.open(env_name, env_flags) 2236 self.env.txn_checkpoint() 2237 2238 self.metadata = self.__open_shelf(full_name, META) 2239 2240 _LOG.debug("Write schema version %s" % _DBVERSION) 2241 with BSDDBTxn(self.env, self.metadata) as txn: 2242 txn.put(b'version', _DBVERSION) 2243 2244 versionpath = os.path.join(name, BDBVERSFN) 2245 version = str(db.version()) 2246 _LOG.debug("Write bsddb version %s" % version) 2247 with open(versionpath, "w") as version_file: 2248 version_file.write(version) 2249 2250 versionpath = os.path.join(name, "pythonversion.txt") 2251 version = str(version_info[0]) 2252 _LOG.debug("Write python version file to %s" % version) 2253 with open(versionpath, "w") as version_file: 2254 version_file.write(version) 2255 2256 versionpath = os.path.join(name, str(PCKVERSFN)) 2257 _LOG.debug("Write pickle version file to %s" % "Yes") 2258 with open(versionpath, "w") as version_file: 2259 version = "Yes" 2260 version_file.write(version) 2261 2262 versionpath = os.path.join(name, str(SCHVERSFN)) 2263 _LOG.debug("Write schema version file to %s" % str(_DBVERSION)) 2264 with open(versionpath, "w") as version_file: 2265 version = str(_DBVERSION) 2266 version_file.write(version) 2267 2268 self.metadata.close() 2269 self.env.close() 2270 2271 def get_dbid(self): 2272 """ 2273 In BSDDB, we use the file directory name as the unique ID for 2274 this database on this computer. 2275 """ 2276 return self.brief_name 2277 2278 def get_summary(self): 2279 """ 2280 Returns dictionary of summary item. 2281 Should include, if possible: 2282 2283 _("Number of people") 2284 _("Version") 2285 _("Schema version") 2286 """ 2287 schema_version = self.metadata.get(b'version', default=None) 2288 bdbversion_file = os.path.join(self.path, BDBVERSFN) 2289 if os.path.isfile(bdbversion_file): 2290 with open(bdbversion_file) as vers_file: 2291 bsddb_version = vers_file.readline().strip() 2292 bsddb_version = ".".join([str(v) for v in safe_eval(bsddb_version)]) 2293 else: 2294 bsddb_version = _("Unknown") 2295 return { 2296 _("Number of people"): self.get_number_of_people(), 2297 _("Number of families"): self.get_number_of_families(), 2298 _("Number of sources"): self.get_number_of_sources(), 2299 _("Number of citations"): self.get_number_of_citations(), 2300 _("Number of events"): self.get_number_of_events(), 2301 _("Number of media"): self.get_number_of_media(), 2302 _("Number of places"): self.get_number_of_places(), 2303 _("Number of repositories"): self.get_number_of_repositories(), 2304 _("Number of notes"): self.get_number_of_notes(), 2305 _("Number of tags"): self.get_number_of_tags(), 2306 _("Schema version"): schema_version, 2307 _("Database version"): bsddb_version, 2308 } 2309 2310def _mkname(path, name): 2311 return os.path.join(path, name + DBEXT) 2312 2313def upgrade_researcher(owner_data): 2314 """ 2315 Upgrade researcher data to include a locality field in the address. 2316 This should be called for databases prior to Gramps 3.3. 2317 """ 2318 addr = tuple([owner_data[0][0], ''] + list(owner_data[0][1:])) 2319 return (addr, owner_data[1], owner_data[2], owner_data[3]) 2320 2321