1import collections
2import copy
3import json
4import logging
5import os
6import sys
7import time
8from queue import Queue
9from subprocess import CalledProcessError
10
11import colorama
12import pkg_resources
13
14from borgmatic.borg import borg as borg_borg
15from borgmatic.borg import check as borg_check
16from borgmatic.borg import create as borg_create
17from borgmatic.borg import environment as borg_environment
18from borgmatic.borg import export_tar as borg_export_tar
19from borgmatic.borg import extract as borg_extract
20from borgmatic.borg import info as borg_info
21from borgmatic.borg import init as borg_init
22from borgmatic.borg import list as borg_list
23from borgmatic.borg import mount as borg_mount
24from borgmatic.borg import prune as borg_prune
25from borgmatic.borg import umount as borg_umount
26from borgmatic.commands.arguments import parse_arguments
27from borgmatic.config import checks, collect, convert, validate
28from borgmatic.hooks import command, dispatch, dump, monitor
29from borgmatic.logger import configure_logging, should_do_markup
30from borgmatic.signals import configure_signals
31from borgmatic.verbosity import verbosity_to_log_level
32
33logger = logging.getLogger(__name__)
34
35LEGACY_CONFIG_PATH = '/usr/local/etc/borgmatic/config'
36
37
38def run_configuration(config_filename, config, arguments):
39    '''
40    Given a config filename, the corresponding parsed config dict, and command-line arguments as a
41    dict from subparser name to a namespace of parsed arguments, execute its defined pruning,
42    backups, consistency checks, and/or other actions.
43
44    Yield a combination of:
45
46      * JSON output strings from successfully executing any actions that produce JSON
47      * logging.LogRecord instances containing errors from any actions or backup hooks that fail
48    '''
49    (location, storage, retention, consistency, hooks) = (
50        config.get(section_name, {})
51        for section_name in ('location', 'storage', 'retention', 'consistency', 'hooks')
52    )
53    global_arguments = arguments['global']
54
55    local_path = location.get('local_path', 'borg')
56    remote_path = location.get('remote_path')
57    retries = storage.get('retries', 0)
58    retry_wait = storage.get('retry_wait', 0)
59    borg_environment.initialize(storage)
60    encountered_error = None
61    error_repository = ''
62    prune_create_or_check = {'prune', 'create', 'check'}.intersection(arguments)
63    monitoring_log_level = verbosity_to_log_level(global_arguments.monitoring_verbosity)
64
65    try:
66        if prune_create_or_check:
67            dispatch.call_hooks(
68                'initialize_monitor',
69                hooks,
70                config_filename,
71                monitor.MONITOR_HOOK_NAMES,
72                monitoring_log_level,
73                global_arguments.dry_run,
74            )
75        if 'prune' in arguments:
76            command.execute_hook(
77                hooks.get('before_prune'),
78                hooks.get('umask'),
79                config_filename,
80                'pre-prune',
81                global_arguments.dry_run,
82            )
83        if 'create' in arguments:
84            command.execute_hook(
85                hooks.get('before_backup'),
86                hooks.get('umask'),
87                config_filename,
88                'pre-backup',
89                global_arguments.dry_run,
90            )
91        if 'check' in arguments:
92            command.execute_hook(
93                hooks.get('before_check'),
94                hooks.get('umask'),
95                config_filename,
96                'pre-check',
97                global_arguments.dry_run,
98            )
99        if 'extract' in arguments:
100            command.execute_hook(
101                hooks.get('before_extract'),
102                hooks.get('umask'),
103                config_filename,
104                'pre-extract',
105                global_arguments.dry_run,
106            )
107        if prune_create_or_check:
108            dispatch.call_hooks(
109                'ping_monitor',
110                hooks,
111                config_filename,
112                monitor.MONITOR_HOOK_NAMES,
113                monitor.State.START,
114                monitoring_log_level,
115                global_arguments.dry_run,
116            )
117    except (OSError, CalledProcessError) as error:
118        if command.considered_soft_failure(config_filename, error):
119            return
120
121        encountered_error = error
122        yield from make_error_log_records(
123            '{}: Error running pre hook'.format(config_filename), error
124        )
125
126    if not encountered_error:
127        repo_queue = Queue()
128        for repo in location['repositories']:
129            repo_queue.put((repo, 0),)
130
131        while not repo_queue.empty():
132            repository_path, retry_num = repo_queue.get()
133            timeout = retry_num * retry_wait
134            if timeout:
135                logger.warning(f'{config_filename}: Sleeping {timeout}s before next retry')
136                time.sleep(timeout)
137            try:
138                yield from run_actions(
139                    arguments=arguments,
140                    location=location,
141                    storage=storage,
142                    retention=retention,
143                    consistency=consistency,
144                    hooks=hooks,
145                    local_path=local_path,
146                    remote_path=remote_path,
147                    repository_path=repository_path,
148                )
149            except (OSError, CalledProcessError, ValueError) as error:
150                yield from make_error_log_records(
151                    '{}: Error running actions for repository'.format(repository_path), error
152                )
153                if retry_num < retries:
154                    repo_queue.put((repository_path, retry_num + 1),)
155                    logger.warning(
156                        f'{config_filename}: Retrying... attempt {retry_num + 1}/{retries}'
157                    )
158                    continue
159                encountered_error = error
160                error_repository = repository_path
161
162    if not encountered_error:
163        try:
164            if 'prune' in arguments:
165                command.execute_hook(
166                    hooks.get('after_prune'),
167                    hooks.get('umask'),
168                    config_filename,
169                    'post-prune',
170                    global_arguments.dry_run,
171                )
172            if 'create' in arguments:
173                dispatch.call_hooks(
174                    'remove_database_dumps',
175                    hooks,
176                    config_filename,
177                    dump.DATABASE_HOOK_NAMES,
178                    location,
179                    global_arguments.dry_run,
180                )
181                command.execute_hook(
182                    hooks.get('after_backup'),
183                    hooks.get('umask'),
184                    config_filename,
185                    'post-backup',
186                    global_arguments.dry_run,
187                )
188            if 'check' in arguments:
189                command.execute_hook(
190                    hooks.get('after_check'),
191                    hooks.get('umask'),
192                    config_filename,
193                    'post-check',
194                    global_arguments.dry_run,
195                )
196            if 'extract' in arguments:
197                command.execute_hook(
198                    hooks.get('after_extract'),
199                    hooks.get('umask'),
200                    config_filename,
201                    'post-extract',
202                    global_arguments.dry_run,
203                )
204            if prune_create_or_check:
205                dispatch.call_hooks(
206                    'ping_monitor',
207                    hooks,
208                    config_filename,
209                    monitor.MONITOR_HOOK_NAMES,
210                    monitor.State.FINISH,
211                    monitoring_log_level,
212                    global_arguments.dry_run,
213                )
214                dispatch.call_hooks(
215                    'destroy_monitor',
216                    hooks,
217                    config_filename,
218                    monitor.MONITOR_HOOK_NAMES,
219                    monitoring_log_level,
220                    global_arguments.dry_run,
221                )
222        except (OSError, CalledProcessError) as error:
223            if command.considered_soft_failure(config_filename, error):
224                return
225
226            encountered_error = error
227            yield from make_error_log_records(
228                '{}: Error running post hook'.format(config_filename), error
229            )
230
231    if encountered_error and prune_create_or_check:
232        try:
233            command.execute_hook(
234                hooks.get('on_error'),
235                hooks.get('umask'),
236                config_filename,
237                'on-error',
238                global_arguments.dry_run,
239                repository=error_repository,
240                error=encountered_error,
241                output=getattr(encountered_error, 'output', ''),
242            )
243            dispatch.call_hooks(
244                'ping_monitor',
245                hooks,
246                config_filename,
247                monitor.MONITOR_HOOK_NAMES,
248                monitor.State.FAIL,
249                monitoring_log_level,
250                global_arguments.dry_run,
251            )
252            dispatch.call_hooks(
253                'destroy_monitor',
254                hooks,
255                config_filename,
256                monitor.MONITOR_HOOK_NAMES,
257                monitoring_log_level,
258                global_arguments.dry_run,
259            )
260        except (OSError, CalledProcessError) as error:
261            if command.considered_soft_failure(config_filename, error):
262                return
263
264            yield from make_error_log_records(
265                '{}: Error running on-error hook'.format(config_filename), error
266            )
267
268
269def run_actions(
270    *,
271    arguments,
272    location,
273    storage,
274    retention,
275    consistency,
276    hooks,
277    local_path,
278    remote_path,
279    repository_path,
280):  # pragma: no cover
281    '''
282    Given parsed command-line arguments as an argparse.ArgumentParser instance, several different
283    configuration dicts, local and remote paths to Borg, and a repository name, run all actions
284    from the command-line arguments on the given repository.
285
286    Yield JSON output strings from executing any actions that produce JSON.
287
288    Raise OSError or subprocess.CalledProcessError if an error occurs running a command for an
289    action. Raise ValueError if the arguments or configuration passed to action are invalid.
290    '''
291    repository = os.path.expanduser(repository_path)
292    global_arguments = arguments['global']
293    dry_run_label = ' (dry run; not making any changes)' if global_arguments.dry_run else ''
294    if 'init' in arguments:
295        logger.info('{}: Initializing repository'.format(repository))
296        borg_init.initialize_repository(
297            repository,
298            storage,
299            arguments['init'].encryption_mode,
300            arguments['init'].append_only,
301            arguments['init'].storage_quota,
302            local_path=local_path,
303            remote_path=remote_path,
304        )
305    if 'prune' in arguments:
306        logger.info('{}: Pruning archives{}'.format(repository, dry_run_label))
307        borg_prune.prune_archives(
308            global_arguments.dry_run,
309            repository,
310            storage,
311            retention,
312            local_path=local_path,
313            remote_path=remote_path,
314            stats=arguments['prune'].stats,
315            files=arguments['prune'].files,
316        )
317    if 'create' in arguments:
318        logger.info('{}: Creating archive{}'.format(repository, dry_run_label))
319        dispatch.call_hooks(
320            'remove_database_dumps',
321            hooks,
322            repository,
323            dump.DATABASE_HOOK_NAMES,
324            location,
325            global_arguments.dry_run,
326        )
327        active_dumps = dispatch.call_hooks(
328            'dump_databases',
329            hooks,
330            repository,
331            dump.DATABASE_HOOK_NAMES,
332            location,
333            global_arguments.dry_run,
334        )
335        stream_processes = [process for processes in active_dumps.values() for process in processes]
336
337        json_output = borg_create.create_archive(
338            global_arguments.dry_run,
339            repository,
340            location,
341            storage,
342            local_path=local_path,
343            remote_path=remote_path,
344            progress=arguments['create'].progress,
345            stats=arguments['create'].stats,
346            json=arguments['create'].json,
347            files=arguments['create'].files,
348            stream_processes=stream_processes,
349        )
350        if json_output:
351            yield json.loads(json_output)
352
353    if 'check' in arguments and checks.repository_enabled_for_checks(repository, consistency):
354        logger.info('{}: Running consistency checks'.format(repository))
355        borg_check.check_archives(
356            repository,
357            storage,
358            consistency,
359            local_path=local_path,
360            remote_path=remote_path,
361            progress=arguments['check'].progress,
362            repair=arguments['check'].repair,
363            only_checks=arguments['check'].only,
364        )
365    if 'extract' in arguments:
366        if arguments['extract'].repository is None or validate.repositories_match(
367            repository, arguments['extract'].repository
368        ):
369            logger.info(
370                '{}: Extracting archive {}'.format(repository, arguments['extract'].archive)
371            )
372            borg_extract.extract_archive(
373                global_arguments.dry_run,
374                repository,
375                borg_list.resolve_archive_name(
376                    repository, arguments['extract'].archive, storage, local_path, remote_path
377                ),
378                arguments['extract'].paths,
379                location,
380                storage,
381                local_path=local_path,
382                remote_path=remote_path,
383                destination_path=arguments['extract'].destination,
384                strip_components=arguments['extract'].strip_components,
385                progress=arguments['extract'].progress,
386            )
387    if 'export-tar' in arguments:
388        if arguments['export-tar'].repository is None or validate.repositories_match(
389            repository, arguments['export-tar'].repository
390        ):
391            logger.info(
392                '{}: Exporting archive {} as tar file'.format(
393                    repository, arguments['export-tar'].archive
394                )
395            )
396            borg_export_tar.export_tar_archive(
397                global_arguments.dry_run,
398                repository,
399                borg_list.resolve_archive_name(
400                    repository, arguments['export-tar'].archive, storage, local_path, remote_path
401                ),
402                arguments['export-tar'].paths,
403                arguments['export-tar'].destination,
404                storage,
405                local_path=local_path,
406                remote_path=remote_path,
407                tar_filter=arguments['export-tar'].tar_filter,
408                files=arguments['export-tar'].files,
409                strip_components=arguments['export-tar'].strip_components,
410            )
411    if 'mount' in arguments:
412        if arguments['mount'].repository is None or validate.repositories_match(
413            repository, arguments['mount'].repository
414        ):
415            if arguments['mount'].archive:
416                logger.info(
417                    '{}: Mounting archive {}'.format(repository, arguments['mount'].archive)
418                )
419            else:
420                logger.info('{}: Mounting repository'.format(repository))
421
422            borg_mount.mount_archive(
423                repository,
424                borg_list.resolve_archive_name(
425                    repository, arguments['mount'].archive, storage, local_path, remote_path
426                ),
427                arguments['mount'].mount_point,
428                arguments['mount'].paths,
429                arguments['mount'].foreground,
430                arguments['mount'].options,
431                storage,
432                local_path=local_path,
433                remote_path=remote_path,
434            )
435    if 'restore' in arguments:
436        if arguments['restore'].repository is None or validate.repositories_match(
437            repository, arguments['restore'].repository
438        ):
439            logger.info(
440                '{}: Restoring databases from archive {}'.format(
441                    repository, arguments['restore'].archive
442                )
443            )
444            dispatch.call_hooks(
445                'remove_database_dumps',
446                hooks,
447                repository,
448                dump.DATABASE_HOOK_NAMES,
449                location,
450                global_arguments.dry_run,
451            )
452
453            restore_names = arguments['restore'].databases or []
454            if 'all' in restore_names:
455                restore_names = []
456
457            archive_name = borg_list.resolve_archive_name(
458                repository, arguments['restore'].archive, storage, local_path, remote_path
459            )
460            found_names = set()
461
462            for hook_name, per_hook_restore_databases in hooks.items():
463                if hook_name not in dump.DATABASE_HOOK_NAMES:
464                    continue
465
466                for restore_database in per_hook_restore_databases:
467                    database_name = restore_database['name']
468                    if restore_names and database_name not in restore_names:
469                        continue
470
471                    found_names.add(database_name)
472                    dump_pattern = dispatch.call_hooks(
473                        'make_database_dump_pattern',
474                        hooks,
475                        repository,
476                        dump.DATABASE_HOOK_NAMES,
477                        location,
478                        database_name,
479                    )[hook_name]
480
481                    # Kick off a single database extract to stdout.
482                    extract_process = borg_extract.extract_archive(
483                        dry_run=global_arguments.dry_run,
484                        repository=repository,
485                        archive=archive_name,
486                        paths=dump.convert_glob_patterns_to_borg_patterns([dump_pattern]),
487                        location_config=location,
488                        storage_config=storage,
489                        local_path=local_path,
490                        remote_path=remote_path,
491                        destination_path='/',
492                        # A directory format dump isn't a single file, and therefore can't extract
493                        # to stdout. In this case, the extract_process return value is None.
494                        extract_to_stdout=bool(restore_database.get('format') != 'directory'),
495                    )
496
497                    # Run a single database restore, consuming the extract stdout (if any).
498                    dispatch.call_hooks(
499                        'restore_database_dump',
500                        {hook_name: [restore_database]},
501                        repository,
502                        dump.DATABASE_HOOK_NAMES,
503                        location,
504                        global_arguments.dry_run,
505                        extract_process,
506                    )
507
508            dispatch.call_hooks(
509                'remove_database_dumps',
510                hooks,
511                repository,
512                dump.DATABASE_HOOK_NAMES,
513                location,
514                global_arguments.dry_run,
515            )
516
517            if not restore_names and not found_names:
518                raise ValueError('No databases were found to restore')
519
520            missing_names = sorted(set(restore_names) - found_names)
521            if missing_names:
522                raise ValueError(
523                    'Cannot restore database(s) {} missing from borgmatic\'s configuration'.format(
524                        ', '.join(missing_names)
525                    )
526                )
527
528    if 'list' in arguments:
529        if arguments['list'].repository is None or validate.repositories_match(
530            repository, arguments['list'].repository
531        ):
532            list_arguments = copy.copy(arguments['list'])
533            if not list_arguments.json:
534                logger.warning('{}: Listing archives'.format(repository))
535            list_arguments.archive = borg_list.resolve_archive_name(
536                repository, list_arguments.archive, storage, local_path, remote_path
537            )
538            json_output = borg_list.list_archives(
539                repository,
540                storage,
541                list_arguments=list_arguments,
542                local_path=local_path,
543                remote_path=remote_path,
544            )
545            if json_output:
546                yield json.loads(json_output)
547    if 'info' in arguments:
548        if arguments['info'].repository is None or validate.repositories_match(
549            repository, arguments['info'].repository
550        ):
551            info_arguments = copy.copy(arguments['info'])
552            if not info_arguments.json:
553                logger.warning('{}: Displaying summary info for archives'.format(repository))
554            info_arguments.archive = borg_list.resolve_archive_name(
555                repository, info_arguments.archive, storage, local_path, remote_path
556            )
557            json_output = borg_info.display_archives_info(
558                repository,
559                storage,
560                info_arguments=info_arguments,
561                local_path=local_path,
562                remote_path=remote_path,
563            )
564            if json_output:
565                yield json.loads(json_output)
566    if 'borg' in arguments:
567        if arguments['borg'].repository is None or validate.repositories_match(
568            repository, arguments['borg'].repository
569        ):
570            logger.warning('{}: Running arbitrary Borg command'.format(repository))
571            archive_name = borg_list.resolve_archive_name(
572                repository, arguments['borg'].archive, storage, local_path, remote_path
573            )
574            borg_borg.run_arbitrary_borg(
575                repository,
576                storage,
577                options=arguments['borg'].options,
578                archive=archive_name,
579                local_path=local_path,
580                remote_path=remote_path,
581            )
582
583
584def load_configurations(config_filenames, overrides=None):
585    '''
586    Given a sequence of configuration filenames, load and validate each configuration file. Return
587    the results as a tuple of: dict of configuration filename to corresponding parsed configuration,
588    and sequence of logging.LogRecord instances containing any parse errors.
589    '''
590    # Dict mapping from config filename to corresponding parsed config dict.
591    configs = collections.OrderedDict()
592    logs = []
593
594    # Parse and load each configuration file.
595    for config_filename in config_filenames:
596        try:
597            configs[config_filename] = validate.parse_configuration(
598                config_filename, validate.schema_filename(), overrides
599            )
600        except (ValueError, OSError, validate.Validation_error) as error:
601            logs.extend(
602                [
603                    logging.makeLogRecord(
604                        dict(
605                            levelno=logging.CRITICAL,
606                            levelname='CRITICAL',
607                            msg='{}: Error parsing configuration file'.format(config_filename),
608                        )
609                    ),
610                    logging.makeLogRecord(
611                        dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
612                    ),
613                ]
614            )
615
616    return (configs, logs)
617
618
619def log_record(suppress_log=False, **kwargs):
620    '''
621    Create a log record based on the given makeLogRecord() arguments, one of which must be
622    named "levelno". Log the record (unless suppress log is set) and return it.
623    '''
624    record = logging.makeLogRecord(kwargs)
625    if suppress_log:
626        return record
627
628    logger.handle(record)
629    return record
630
631
632def make_error_log_records(message, error=None):
633    '''
634    Given error message text and an optional exception object, yield a series of logging.LogRecord
635    instances with error summary information. As a side effect, log each record.
636    '''
637    if not error:
638        yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
639        return
640
641    try:
642        raise error
643    except CalledProcessError as error:
644        yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
645        if error.output:
646            # Suppress these logs for now and save full error output for the log summary at the end.
647            yield log_record(
648                levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output, suppress_log=True
649            )
650        yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
651    except (ValueError, OSError) as error:
652        yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
653        yield log_record(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)
654    except:  # noqa: E722
655        # Raising above only as a means of determining the error type. Swallow the exception here
656        # because we don't want the exception to propagate out of this function.
657        pass
658
659
660def get_local_path(configs):
661    '''
662    Arbitrarily return the local path from the first configuration dict. Default to "borg" if not
663    set.
664    '''
665    return next(iter(configs.values())).get('location', {}).get('local_path', 'borg')
666
667
668def collect_configuration_run_summary_logs(configs, arguments):
669    '''
670    Given a dict of configuration filename to corresponding parsed configuration, and parsed
671    command-line arguments as a dict from subparser name to a parsed namespace of arguments, run
672    each configuration file and yield a series of logging.LogRecord instances containing summary
673    information about each run.
674
675    As a side effect of running through these configuration files, output their JSON results, if
676    any, to stdout.
677    '''
678    # Run cross-file validation checks.
679    if 'extract' in arguments:
680        repository = arguments['extract'].repository
681    elif 'list' in arguments and arguments['list'].archive:
682        repository = arguments['list'].repository
683    elif 'mount' in arguments:
684        repository = arguments['mount'].repository
685    else:
686        repository = None
687
688    if repository:
689        try:
690            validate.guard_configuration_contains_repository(repository, configs)
691        except ValueError as error:
692            yield from make_error_log_records(str(error))
693            return
694
695    if not configs:
696        yield from make_error_log_records(
697            '{}: No valid configuration files found'.format(
698                ' '.join(arguments['global'].config_paths)
699            )
700        )
701        return
702
703    if 'create' in arguments:
704        try:
705            for config_filename, config in configs.items():
706                hooks = config.get('hooks', {})
707                command.execute_hook(
708                    hooks.get('before_everything'),
709                    hooks.get('umask'),
710                    config_filename,
711                    'pre-everything',
712                    arguments['global'].dry_run,
713                )
714        except (CalledProcessError, ValueError, OSError) as error:
715            yield from make_error_log_records('Error running pre-everything hook', error)
716            return
717
718    # Execute the actions corresponding to each configuration file.
719    json_results = []
720    for config_filename, config in configs.items():
721        results = list(run_configuration(config_filename, config, arguments))
722        error_logs = tuple(result for result in results if isinstance(result, logging.LogRecord))
723
724        if error_logs:
725            yield from make_error_log_records(
726                '{}: Error running configuration file'.format(config_filename)
727            )
728            yield from error_logs
729        else:
730            yield logging.makeLogRecord(
731                dict(
732                    levelno=logging.INFO,
733                    levelname='INFO',
734                    msg='{}: Successfully ran configuration file'.format(config_filename),
735                )
736            )
737            if results:
738                json_results.extend(results)
739
740    if 'umount' in arguments:
741        logger.info('Unmounting mount point {}'.format(arguments['umount'].mount_point))
742        try:
743            borg_umount.unmount_archive(
744                mount_point=arguments['umount'].mount_point, local_path=get_local_path(configs)
745            )
746        except (CalledProcessError, OSError) as error:
747            yield from make_error_log_records('Error unmounting mount point', error)
748
749    if json_results:
750        sys.stdout.write(json.dumps(json_results))
751
752    if 'create' in arguments:
753        try:
754            for config_filename, config in configs.items():
755                hooks = config.get('hooks', {})
756                command.execute_hook(
757                    hooks.get('after_everything'),
758                    hooks.get('umask'),
759                    config_filename,
760                    'post-everything',
761                    arguments['global'].dry_run,
762                )
763        except (CalledProcessError, ValueError, OSError) as error:
764            yield from make_error_log_records('Error running post-everything hook', error)
765
766
767def exit_with_help_link():  # pragma: no cover
768    '''
769    Display a link to get help and exit with an error code.
770    '''
771    logger.critical('')
772    logger.critical('Need some help? https://torsion.org/borgmatic/#issues')
773    sys.exit(1)
774
775
776def main():  # pragma: no cover
777    configure_signals()
778
779    try:
780        arguments = parse_arguments(*sys.argv[1:])
781    except ValueError as error:
782        configure_logging(logging.CRITICAL)
783        logger.critical(error)
784        exit_with_help_link()
785    except SystemExit as error:
786        if error.code == 0:
787            raise error
788        configure_logging(logging.CRITICAL)
789        logger.critical('Error parsing arguments: {}'.format(' '.join(sys.argv)))
790        exit_with_help_link()
791
792    global_arguments = arguments['global']
793    if global_arguments.version:
794        print(pkg_resources.require('borgmatic')[0].version)
795        sys.exit(0)
796
797    config_filenames = tuple(collect.collect_config_filenames(global_arguments.config_paths))
798    configs, parse_logs = load_configurations(config_filenames, global_arguments.overrides)
799
800    any_json_flags = any(
801        getattr(sub_arguments, 'json', False) for sub_arguments in arguments.values()
802    )
803    colorama.init(
804        autoreset=True,
805        strip=not should_do_markup(global_arguments.no_color or any_json_flags, configs),
806    )
807    try:
808        configure_logging(
809            verbosity_to_log_level(global_arguments.verbosity),
810            verbosity_to_log_level(global_arguments.syslog_verbosity),
811            verbosity_to_log_level(global_arguments.log_file_verbosity),
812            verbosity_to_log_level(global_arguments.monitoring_verbosity),
813            global_arguments.log_file,
814        )
815    except (FileNotFoundError, PermissionError) as error:
816        configure_logging(logging.CRITICAL)
817        logger.critical('Error configuring logging: {}'.format(error))
818        exit_with_help_link()
819
820    logger.debug('Ensuring legacy configuration is upgraded')
821    convert.guard_configuration_upgraded(LEGACY_CONFIG_PATH, config_filenames)
822
823    summary_logs = parse_logs + list(collect_configuration_run_summary_logs(configs, arguments))
824    summary_logs_max_level = max(log.levelno for log in summary_logs)
825
826    for message in ('', 'summary:'):
827        log_record(
828            levelno=summary_logs_max_level,
829            levelname=logging.getLevelName(summary_logs_max_level),
830            msg=message,
831        )
832
833    for log in summary_logs:
834        logger.handle(log)
835
836    if summary_logs_max_level >= logging.CRITICAL:
837        exit_with_help_link()
838