1# Copyright 2005 Duke University
2# Copyright (C) 2012-2018 Red Hat, Inc.
3#
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
6# the Free Software Foundation; either version 2 of the License, or
7# (at your option) any later version.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12# GNU Library General Public License for more details.
13#
14# You should have received a copy of the GNU General Public License
15# along with this program; if not, write to the Free Software
16# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17
18"""
19Supplies the Base class.
20"""
21
22from __future__ import absolute_import
23from __future__ import division
24from __future__ import print_function
25from __future__ import unicode_literals
26
27import argparse
28import dnf
29import libdnf.transaction
30
31from copy import deepcopy
32from dnf.comps import CompsQuery
33from dnf.i18n import _, P_, ucd
34from dnf.util import _parse_specs
35from dnf.db.history import SwdbInterface
36from dnf.yum import misc
37try:
38    from collections.abc import Sequence
39except ImportError:
40    from collections import Sequence
41import datetime
42import dnf.callback
43import dnf.comps
44import dnf.conf
45import dnf.conf.read
46import dnf.crypto
47import dnf.dnssec
48import dnf.drpm
49import dnf.exceptions
50import dnf.goal
51import dnf.history
52import dnf.lock
53import dnf.logging
54# WITH_MODULES is used by ansible (lib/ansible/modules/packaging/os/dnf.py)
55try:
56    import dnf.module.module_base
57    WITH_MODULES = True
58except ImportError:
59    WITH_MODULES = False
60import dnf.persistor
61import dnf.plugin
62import dnf.query
63import dnf.repo
64import dnf.repodict
65import dnf.rpm.connection
66import dnf.rpm.miscutils
67import dnf.rpm.transaction
68import dnf.sack
69import dnf.selector
70import dnf.subject
71import dnf.transaction
72import dnf.util
73import dnf.yum.rpmtrans
74import functools
75import hawkey
76import itertools
77import logging
78import math
79import os
80import operator
81import re
82import rpm
83import time
84import shutil
85
86
87logger = logging.getLogger("dnf")
88
89
90class Base(object):
91
92    def __init__(self, conf=None):
93        # :api
94        self._closed = False
95        self._conf = conf or self._setup_default_conf()
96        self._goal = None
97        self._repo_persistor = None
98        self._sack = None
99        self._transaction = None
100        self._priv_ts = None
101        self._comps = None
102        self._comps_trans = dnf.comps.TransactionBunch()
103        self._history = None
104        self._tempfiles = set()
105        self._trans_tempfiles = set()
106        self._ds_callback = dnf.callback.Depsolve()
107        self._logging = dnf.logging.Logging()
108        self._repos = dnf.repodict.RepoDict()
109        self._rpm_probfilter = set([rpm.RPMPROB_FILTER_OLDPACKAGE])
110        self._plugins = dnf.plugin.Plugins()
111        self._trans_success = False
112        self._trans_install_set = False
113        self._tempfile_persistor = None
114        #  self._update_security_filters is used by ansible
115        self._update_security_filters = []
116        self._update_security_options = {}
117        self._allow_erasing = False
118        self._repo_set_imported_gpg_keys = set()
119        self.output = None
120
121    def __enter__(self):
122        return self
123
124    def __exit__(self, *exc_args):
125        self.close()
126
127    def __del__(self):
128        self.close()
129
130    def _add_tempfiles(self, files):
131        if self._transaction:
132            self._trans_tempfiles.update(files)
133        elif self.conf.destdir:
134            pass
135        else:
136            self._tempfiles.update(files)
137
138    def _add_repo_to_sack(self, repo):
139        repo.load()
140        mdload_flags = dict(load_filelists=True,
141                            load_presto=repo.deltarpm,
142                            load_updateinfo=True)
143        if repo.load_metadata_other:
144            mdload_flags["load_other"] = True
145        try:
146            self._sack.load_repo(repo._repo, build_cache=True, **mdload_flags)
147        except hawkey.Exception as e:
148            logger.debug(_("loading repo '{}' failure: {}").format(repo.id, e))
149            raise dnf.exceptions.RepoError(
150                _("Loading repository '{}' has failed").format(repo.id))
151
152    @staticmethod
153    def _setup_default_conf():
154        conf = dnf.conf.Conf()
155        subst = conf.substitutions
156        if 'releasever' not in subst:
157            subst['releasever'] = \
158                dnf.rpm.detect_releasever(conf.installroot)
159        return conf
160
161    def _setup_modular_excludes(self):
162        hot_fix_repos = [i.id for i in self.repos.iter_enabled() if i.module_hotfixes]
163        try:
164            solver_errors = self.sack.filter_modules(
165                self._moduleContainer, hot_fix_repos, self.conf.installroot,
166                self.conf.module_platform_id, update_only=False, debugsolver=self.conf.debug_solver,
167                module_obsoletes=self.conf.module_obsoletes)
168        except hawkey.Exception as e:
169            raise dnf.exceptions.Error(ucd(e))
170        if solver_errors:
171            logger.warning(
172                dnf.module.module_base.format_modular_solver_errors(solver_errors[0]))
173
174    def _setup_excludes_includes(self, only_main=False):
175        disabled = set(self.conf.disable_excludes)
176        if 'all' in disabled and WITH_MODULES:
177            self._setup_modular_excludes()
178            return
179        repo_includes = []
180        repo_excludes = []
181        # first evaluate repo specific includes/excludes
182        if not only_main:
183            for r in self.repos.iter_enabled():
184                if r.id in disabled:
185                    continue
186                if len(r.includepkgs) > 0:
187                    incl_query = self.sack.query().filterm(empty=True)
188                    for incl in set(r.includepkgs):
189                        subj = dnf.subject.Subject(incl)
190                        incl_query = incl_query.union(subj.get_best_query(
191                            self.sack, with_nevra=True, with_provides=False, with_filenames=False))
192                    incl_query.filterm(reponame=r.id)
193                    repo_includes.append((incl_query.apply(), r.id))
194                excl_query = self.sack.query().filterm(empty=True)
195                for excl in set(r.excludepkgs):
196                    subj = dnf.subject.Subject(excl)
197                    excl_query = excl_query.union(subj.get_best_query(
198                        self.sack, with_nevra=True, with_provides=False, with_filenames=False))
199                excl_query.filterm(reponame=r.id)
200                if excl_query:
201                    repo_excludes.append((excl_query, r.id))
202
203        # then main (global) includes/excludes because they can mask
204        # repo specific settings
205        if 'main' not in disabled:
206            include_query = self.sack.query().filterm(empty=True)
207            if len(self.conf.includepkgs) > 0:
208                for incl in set(self.conf.includepkgs):
209                    subj = dnf.subject.Subject(incl)
210                    include_query = include_query.union(subj.get_best_query(
211                        self.sack, with_nevra=True, with_provides=False, with_filenames=False))
212            exclude_query = self.sack.query().filterm(empty=True)
213            for excl in set(self.conf.excludepkgs):
214                subj = dnf.subject.Subject(excl)
215                exclude_query = exclude_query.union(subj.get_best_query(
216                    self.sack, with_nevra=True, with_provides=False, with_filenames=False))
217            if len(self.conf.includepkgs) > 0:
218                self.sack.add_includes(include_query)
219                self.sack.set_use_includes(True)
220            if exclude_query:
221                self.sack.add_excludes(exclude_query)
222
223        if repo_includes:
224            for query, repoid in repo_includes:
225                self.sack.add_includes(query)
226                self.sack.set_use_includes(True, repoid)
227
228        if repo_excludes:
229            for query, repoid in repo_excludes:
230                self.sack.add_excludes(query)
231
232        if not only_main and WITH_MODULES:
233            self._setup_modular_excludes()
234
235    def _store_persistent_data(self):
236        if self._repo_persistor and not self.conf.cacheonly:
237            expired = [r.id for r in self.repos.iter_enabled()
238                       if (r.metadata and r._repo.isExpired())]
239            self._repo_persistor.expired_to_add.update(expired)
240            self._repo_persistor.save()
241
242        if self._tempfile_persistor:
243            self._tempfile_persistor.save()
244
245    @property
246    def comps(self):
247        # :api
248        if self._comps is None:
249            self.read_comps(arch_filter=True)
250        return self._comps
251
252    @property
253    def conf(self):
254        # :api
255        return self._conf
256
257    @property
258    def repos(self):
259        # :api
260        return self._repos
261
262    @repos.deleter
263    def repos(self):
264        # :api
265        self._repos = None
266
267    @property
268    @dnf.util.lazyattr("_priv_rpmconn")
269    def _rpmconn(self):
270        return dnf.rpm.connection.RpmConnection(self.conf.installroot)
271
272    @property
273    def sack(self):
274        # :api
275        return self._sack
276
277    @property
278    def _moduleContainer(self):
279        if self.sack is None:
280            raise dnf.exceptions.Error("Sack was not initialized")
281        if self.sack._moduleContainer is None:
282            self.sack._moduleContainer = libdnf.module.ModulePackageContainer(
283                False, self.conf.installroot, self.conf.substitutions["arch"], self.conf.persistdir)
284        return self.sack._moduleContainer
285
286    @property
287    def transaction(self):
288        # :api
289        return self._transaction
290
291    @transaction.setter
292    def transaction(self, value):
293        # :api
294        if self._transaction:
295            raise ValueError('transaction already set')
296        self._transaction = value
297
298    def _activate_persistor(self):
299        self._repo_persistor = dnf.persistor.RepoPersistor(self.conf.cachedir)
300
301    def init_plugins(self, disabled_glob=(), enable_plugins=(), cli=None):
302        # :api
303        """Load plugins and run their __init__()."""
304        if self.conf.plugins:
305            self._plugins._load(self.conf, disabled_glob, enable_plugins)
306        self._plugins._run_init(self, cli)
307
308    def pre_configure_plugins(self):
309        # :api
310        """Run plugins pre_configure() method."""
311        self._plugins._run_pre_config()
312
313    def configure_plugins(self):
314        # :api
315        """Run plugins configure() method."""
316        self._plugins._run_config()
317
318    def update_cache(self, timer=False):
319        # :api
320
321        period = self.conf.metadata_timer_sync
322        if self._repo_persistor is None:
323            self._activate_persistor()
324        persistor = self._repo_persistor
325        if timer:
326            if dnf.util.on_metered_connection():
327                msg = _('Metadata timer caching disabled '
328                        'when running on metered connection.')
329                logger.info(msg)
330                return False
331            if dnf.util.on_ac_power() is False:
332                msg = _('Metadata timer caching disabled '
333                        'when running on a battery.')
334                logger.info(msg)
335                return False
336            if period <= 0:
337                msg = _('Metadata timer caching disabled.')
338                logger.info(msg)
339                return False
340            since_last_makecache = persistor.since_last_makecache()
341            if since_last_makecache is not None and since_last_makecache < period:
342                logger.info(_('Metadata cache refreshed recently.'))
343                return False
344            for repo in self.repos.values():
345                repo._repo.setMaxMirrorTries(1)
346
347        if not self.repos._any_enabled():
348            logger.info(_('There are no enabled repositories in "{}".').format(
349                '", "'.join(self.conf.reposdir)))
350            return False
351
352        for r in self.repos.iter_enabled():
353            (is_cache, expires_in) = r._metadata_expire_in()
354            if expires_in is None:
355                logger.info(_('%s: will never be expired and will not be refreshed.'), r.id)
356            elif not is_cache or expires_in <= 0:
357                logger.debug(_('%s: has expired and will be refreshed.'), r.id)
358                r._repo.expire()
359            elif timer and expires_in < period:
360                # expires within the checking period:
361                msg = _("%s: metadata will expire after %d seconds and will be refreshed now")
362                logger.debug(msg, r.id, expires_in)
363                r._repo.expire()
364            else:
365                logger.debug(_('%s: will expire after %d seconds.'), r.id,
366                             expires_in)
367
368        if timer:
369            persistor.reset_last_makecache = True
370        self.fill_sack(load_system_repo=False, load_available_repos=True)  # performs the md sync
371        logger.info(_('Metadata cache created.'))
372        return True
373
374    def fill_sack(self, load_system_repo=True, load_available_repos=True):
375        # :api
376        """Prepare the Sack and the Goal objects. """
377        timer = dnf.logging.Timer('sack setup')
378        self.reset(sack=True, goal=True)
379        self._sack = dnf.sack._build_sack(self)
380        lock = dnf.lock.build_metadata_lock(self.conf.cachedir, self.conf.exit_on_lock)
381        with lock:
382            if load_system_repo is not False:
383                try:
384                    # FIXME: If build_cache=True, @System.solv is incorrectly updated in install-
385                    # remove loops
386                    self._sack.load_system_repo(build_cache=False)
387                except IOError:
388                    if load_system_repo != 'auto':
389                        raise
390            if load_available_repos:
391                error_repos = []
392                mts = 0
393                age = time.time()
394                # Iterate over installed GPG keys and check their validity using DNSSEC
395                if self.conf.gpgkey_dns_verification:
396                    dnf.dnssec.RpmImportedKeys.check_imported_keys_validity()
397                for r in self.repos.iter_enabled():
398                    try:
399                        self._add_repo_to_sack(r)
400                        if r._repo.getTimestamp() > mts:
401                            mts = r._repo.getTimestamp()
402                        if r._repo.getAge() < age:
403                            age = r._repo.getAge()
404                        logger.debug(_("%s: using metadata from %s."), r.id,
405                                     dnf.util.normalize_time(
406                                         r._repo.getMaxTimestamp()))
407                    except dnf.exceptions.RepoError as e:
408                        r._repo.expire()
409                        if r.skip_if_unavailable is False:
410                            raise
411                        logger.warning("Error: %s", e)
412                        error_repos.append(r.id)
413                        r.disable()
414                if error_repos:
415                    logger.warning(
416                        _("Ignoring repositories: %s"), ', '.join(error_repos))
417                if self.repos._any_enabled():
418                    if age != 0 and mts != 0:
419                        logger.info(_("Last metadata expiration check: %s ago on %s."),
420                                    datetime.timedelta(seconds=int(age)),
421                                    dnf.util.normalize_time(mts))
422            else:
423                self.repos.all().disable()
424        conf = self.conf
425        self._sack._configure(conf.installonlypkgs, conf.installonly_limit, conf.allow_vendor_change)
426        self._setup_excludes_includes()
427        timer()
428        self._goal = dnf.goal.Goal(self._sack)
429        self._goal.protect_running_kernel = conf.protect_running_kernel
430        self._plugins.run_sack()
431        return self._sack
432
433    def fill_sack_from_repos_in_cache(self, load_system_repo=True):
434        # :api
435        """
436        Prepare Sack and Goal objects and also load all enabled repositories from cache only,
437        it doesn't download anything and it doesn't check if metadata are expired.
438        If there is not enough metadata present (repond.xml or both primary.xml and solv file
439        are missing) given repo is either skipped or it throws a RepoError exception depending
440        on skip_if_unavailable configuration.
441        """
442        timer = dnf.logging.Timer('sack setup')
443        self.reset(sack=True, goal=True)
444        self._sack = dnf.sack._build_sack(self)
445        lock = dnf.lock.build_metadata_lock(self.conf.cachedir, self.conf.exit_on_lock)
446        with lock:
447            if load_system_repo is not False:
448                try:
449                    # FIXME: If build_cache=True, @System.solv is incorrectly updated in install-
450                    # remove loops
451                    self._sack.load_system_repo(build_cache=False)
452                except IOError:
453                    if load_system_repo != 'auto':
454                        raise
455
456            error_repos = []
457            # Iterate over installed GPG keys and check their validity using DNSSEC
458            if self.conf.gpgkey_dns_verification:
459                dnf.dnssec.RpmImportedKeys.check_imported_keys_validity()
460            for repo in self.repos.iter_enabled():
461                try:
462                    repo._repo.loadCache(throwExcept=True, ignoreMissing=True)
463                    mdload_flags = dict(load_filelists=True,
464                                        load_presto=repo.deltarpm,
465                                        load_updateinfo=True)
466                    if repo.load_metadata_other:
467                        mdload_flags["load_other"] = True
468
469                    self._sack.load_repo(repo._repo, **mdload_flags)
470
471                    logger.debug(_("%s: using metadata from %s."), repo.id,
472                                 dnf.util.normalize_time(
473                                     repo._repo.getMaxTimestamp()))
474                except (RuntimeError, hawkey.Exception) as e:
475                    if repo.skip_if_unavailable is False:
476                        raise dnf.exceptions.RepoError(
477                            _("loading repo '{}' failure: {}").format(repo.id, e))
478                    else:
479                        logger.debug(_("loading repo '{}' failure: {}").format(repo.id, e))
480                    error_repos.append(repo.id)
481                    repo.disable()
482            if error_repos:
483                logger.warning(
484                    _("Ignoring repositories: %s"), ', '.join(error_repos))
485
486        conf = self.conf
487        self._sack._configure(conf.installonlypkgs, conf.installonly_limit, conf.allow_vendor_change)
488        self._setup_excludes_includes()
489        timer()
490        self._goal = dnf.goal.Goal(self._sack)
491        self._goal.protect_running_kernel = conf.protect_running_kernel
492        self._plugins.run_sack()
493        return self._sack
494
495    def _finalize_base(self):
496        self._tempfile_persistor = dnf.persistor.TempfilePersistor(
497            self.conf.cachedir)
498
499        if not self.conf.keepcache:
500            self._clean_packages(self._tempfiles)
501            if self._trans_success:
502                self._trans_tempfiles.update(
503                    self._tempfile_persistor.get_saved_tempfiles())
504                self._tempfile_persistor.empty()
505                if self._trans_install_set:
506                    self._clean_packages(self._trans_tempfiles)
507            else:
508                self._tempfile_persistor.tempfiles_to_add.update(
509                    self._trans_tempfiles)
510
511        if self._tempfile_persistor.tempfiles_to_add:
512            logger.info(_("The downloaded packages were saved in cache "
513                          "until the next successful transaction."))
514            logger.info(_("You can remove cached packages by executing "
515                          "'%s'."), "{prog} clean packages".format(prog=dnf.util.MAIN_PROG))
516
517        # Do not trigger the lazy creation:
518        if self._history is not None:
519            self.history.close()
520        self._store_persistent_data()
521        self._closeRpmDB()
522        self._trans_success = False
523
524    def close(self):
525        # :api
526        """Close all potential handles and clean cache.
527
528        Typically the handles are to data sources and sinks.
529
530        """
531
532        if self._closed:
533            return
534        logger.log(dnf.logging.DDEBUG, 'Cleaning up.')
535        self._closed = True
536        self._finalize_base()
537        self.reset(sack=True, repos=True, goal=True)
538        self._plugins = None
539
540    def read_all_repos(self, opts=None):
541        # :api
542        """Read repositories from the main conf file and from .repo files."""
543
544        reader = dnf.conf.read.RepoReader(self.conf, opts)
545        for repo in reader:
546            try:
547                self.repos.add(repo)
548            except dnf.exceptions.ConfigError as e:
549                logger.warning(e)
550
551    def reset(self, sack=False, repos=False, goal=False):
552        # :api
553        """Make the Base object forget about various things."""
554        if sack:
555            self._sack = None
556        if repos:
557            self._repos = dnf.repodict.RepoDict()
558        if goal:
559            self._goal = None
560            if self._sack is not None:
561                self._goal = dnf.goal.Goal(self._sack)
562                self._goal.protect_running_kernel = self.conf.protect_running_kernel
563            if self._sack and self._moduleContainer:
564                # sack must be set to enable operations on moduleContainer
565                self._moduleContainer.rollback()
566            if self._history is not None:
567                self.history.close()
568            self._comps_trans = dnf.comps.TransactionBunch()
569            self._transaction = None
570        self._update_security_filters = []
571
572    def _closeRpmDB(self):
573        """Closes down the instances of rpmdb that could be open."""
574        del self._ts
575
576    _TS_FLAGS_TO_RPM = {'noscripts': rpm.RPMTRANS_FLAG_NOSCRIPTS,
577                        'notriggers': rpm.RPMTRANS_FLAG_NOTRIGGERS,
578                        'nodocs': rpm.RPMTRANS_FLAG_NODOCS,
579                        'test': rpm.RPMTRANS_FLAG_TEST,
580                        'justdb': rpm.RPMTRANS_FLAG_JUSTDB,
581                        'nocontexts': rpm.RPMTRANS_FLAG_NOCONTEXTS,
582                        'nocrypto': rpm.RPMTRANS_FLAG_NOFILEDIGEST}
583    if hasattr(rpm, 'RPMTRANS_FLAG_NOCAPS'):
584        # Introduced in rpm-4.14
585        _TS_FLAGS_TO_RPM['nocaps'] = rpm.RPMTRANS_FLAG_NOCAPS
586
587    _TS_VSFLAGS_TO_RPM = {'nocrypto': rpm._RPMVSF_NOSIGNATURES |
588                          rpm._RPMVSF_NODIGESTS}
589
590    @property
591    def goal(self):
592        return self._goal
593
594    @property
595    def _ts(self):
596        """Set up the RPM transaction set that will be used
597           for all the work."""
598        if self._priv_ts is not None:
599            return self._priv_ts
600        self._priv_ts = dnf.rpm.transaction.TransactionWrapper(
601            self.conf.installroot)
602        self._priv_ts.setFlags(0)  # reset everything.
603        for flag in self.conf.tsflags:
604            rpm_flag = self._TS_FLAGS_TO_RPM.get(flag)
605            if rpm_flag is None:
606                logger.critical(_('Invalid tsflag in config file: %s'), flag)
607                continue
608            self._priv_ts.addTsFlag(rpm_flag)
609            vs_flag = self._TS_VSFLAGS_TO_RPM.get(flag)
610            if vs_flag is not None:
611                self._priv_ts.pushVSFlags(vs_flag)
612
613        if not self.conf.diskspacecheck:
614            self._rpm_probfilter.add(rpm.RPMPROB_FILTER_DISKSPACE)
615
616        if self.conf.ignorearch:
617            self._rpm_probfilter.add(rpm.RPMPROB_FILTER_IGNOREARCH)
618
619        probfilter = functools.reduce(operator.or_, self._rpm_probfilter, 0)
620        self._priv_ts.setProbFilter(probfilter)
621        return self._priv_ts
622
623    @_ts.deleter
624    def _ts(self):
625        """Releases the RPM transaction set. """
626        if self._priv_ts is None:
627            return
628        self._priv_ts.close()
629        del self._priv_ts
630        self._priv_ts = None
631
632    def read_comps(self, arch_filter=False):
633        # :api
634        """Create the groups object to access the comps metadata."""
635        timer = dnf.logging.Timer('loading comps')
636        self._comps = dnf.comps.Comps()
637
638        logger.log(dnf.logging.DDEBUG, 'Getting group metadata')
639        for repo in self.repos.iter_enabled():
640            if not repo.enablegroups:
641                continue
642            if not repo.metadata:
643                continue
644            comps_fn = repo._repo.getCompsFn()
645            if not comps_fn:
646                continue
647
648            logger.log(dnf.logging.DDEBUG,
649                       'Adding group file from repository: %s', repo.id)
650            if repo._repo.getSyncStrategy() == dnf.repo.SYNC_ONLY_CACHE:
651                decompressed = misc.calculate_repo_gen_dest(comps_fn,
652                                                            'groups.xml')
653                if not os.path.exists(decompressed):
654                    # root privileges are needed for comps decompression
655                    continue
656            else:
657                decompressed = misc.repo_gen_decompress(comps_fn, 'groups.xml')
658
659            try:
660                self._comps._add_from_xml_filename(decompressed)
661            except dnf.exceptions.CompsError as e:
662                msg = _('Failed to add groups file for repository: %s - %s')
663                logger.critical(msg, repo.id, e)
664
665        if arch_filter:
666            self._comps._i.arch_filter(
667                [self._conf.substitutions['basearch']])
668        timer()
669        return self._comps
670
671    def _getHistory(self):
672        """auto create the history object that to access/append the transaction
673           history information. """
674        if self._history is None:
675            releasever = self.conf.releasever
676            self._history = SwdbInterface(self.conf.persistdir, releasever=releasever)
677        return self._history
678
679    history = property(fget=lambda self: self._getHistory(),
680                       fset=lambda self, value: setattr(
681                           self, "_history", value),
682                       fdel=lambda self: setattr(self, "_history", None),
683                       doc="DNF SWDB Interface Object")
684
685    def _goal2transaction(self, goal):
686        ts = self.history.rpm
687        all_obsoleted = set(goal.list_obsoleted())
688        installonly_query = self._get_installonly_query()
689        installonly_query.apply()
690        installonly_query_installed = installonly_query.installed().apply()
691
692        for pkg in goal.list_downgrades():
693            obs = goal.obsoleted_by_package(pkg)
694            downgraded = obs[0]
695            self._ds_callback.pkg_added(downgraded, 'dd')
696            self._ds_callback.pkg_added(pkg, 'd')
697            ts.add_downgrade(pkg, downgraded, obs[1:])
698        for pkg in goal.list_reinstalls():
699            self._ds_callback.pkg_added(pkg, 'r')
700            obs = goal.obsoleted_by_package(pkg)
701            nevra_pkg = str(pkg)
702            # reinstall could obsolete multiple packages with the same NEVRA or different NEVRA
703            # Set the package with the same NEVRA as reinstalled
704            obsoletes = []
705            for obs_pkg in obs:
706                if str(obs_pkg) == nevra_pkg:
707                    obsoletes.insert(0, obs_pkg)
708                else:
709                    obsoletes.append(obs_pkg)
710            reinstalled = obsoletes[0]
711            ts.add_reinstall(pkg, reinstalled, obsoletes[1:])
712        for pkg in goal.list_installs():
713            self._ds_callback.pkg_added(pkg, 'i')
714            obs = goal.obsoleted_by_package(pkg)
715            # Skip obsoleted packages that are not part of all_obsoleted,
716            # they are handled as upgrades/downgrades.
717            # Also keep RPMs with the same name - they're not always in all_obsoleted.
718            obs = [i for i in obs if i in all_obsoleted or i.name == pkg.name]
719
720            reason = goal.get_reason(pkg)
721
722            #  Inherit reason if package is installonly an package with same name is installed
723            #  Use the same logic like upgrade
724            #  Upgrade of installonly packages result in install or install and remove step
725            if pkg in installonly_query and installonly_query_installed.filter(name=pkg.name):
726                reason = ts.get_reason(pkg)
727
728            # inherit the best reason from obsoleted packages
729            for obsolete in obs:
730                reason_obsolete = ts.get_reason(obsolete)
731                if libdnf.transaction.TransactionItemReasonCompare(reason, reason_obsolete) == -1:
732                    reason = reason_obsolete
733
734            ts.add_install(pkg, obs, reason)
735            cb = lambda pkg: self._ds_callback.pkg_added(pkg, 'od')
736            dnf.util.mapall(cb, obs)
737        for pkg in goal.list_upgrades():
738            obs = goal.obsoleted_by_package(pkg)
739            upgraded = None
740            for i in obs:
741                # try to find a package with matching name as the upgrade
742                if i.name == pkg.name:
743                    upgraded = i
744                    break
745            if upgraded is None:
746                # no matching name -> pick the first one
747                upgraded = obs.pop(0)
748            else:
749                obs.remove(upgraded)
750            # Skip obsoleted packages that are not part of all_obsoleted,
751            # they are handled as upgrades/downgrades.
752            # Also keep RPMs with the same name - they're not always in all_obsoleted.
753            obs = [i for i in obs if i in all_obsoleted or i.name == pkg.name]
754
755            cb = lambda pkg: self._ds_callback.pkg_added(pkg, 'od')
756            dnf.util.mapall(cb, obs)
757            if pkg in installonly_query:
758                ts.add_install(pkg, obs)
759            else:
760                ts.add_upgrade(pkg, upgraded, obs)
761                self._ds_callback.pkg_added(upgraded, 'ud')
762            self._ds_callback.pkg_added(pkg, 'u')
763        erasures = goal.list_erasures()
764        if erasures:
765            remaining_installed_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES).installed()
766            remaining_installed_query.filterm(pkg__neq=erasures)
767            for pkg in erasures:
768                if remaining_installed_query.filter(name=pkg.name):
769                    remaining = remaining_installed_query[0]
770                    ts.get_reason(remaining)
771                    self.history.set_reason(remaining, ts.get_reason(remaining))
772                self._ds_callback.pkg_added(pkg, 'e')
773                reason = goal.get_reason(pkg)
774                ts.add_erase(pkg, reason)
775        return ts
776
777    def _query_matches_installed(self, q):
778        """ See what packages in the query match packages (also in older
779            versions, but always same architecture) that are already installed.
780
781            Unlike in case of _sltr_matches_installed(), it is practical here
782            to know even the packages in the original query that can still be
783            installed.
784        """
785        inst = q.installed()
786        inst_per_arch = inst._na_dict()
787        avail_per_arch = q.available()._na_dict()
788        avail_l = []
789        inst_l = []
790        for na in avail_per_arch:
791            if na in inst_per_arch:
792                inst_l.append(inst_per_arch[na][0])
793            else:
794                avail_l.append(avail_per_arch[na])
795        return inst_l, avail_l
796
797    def _sltr_matches_installed(self, sltr):
798        """ See if sltr matches a patches that is (in older version or different
799            architecture perhaps) already installed.
800        """
801        inst = self.sack.query().installed().filterm(pkg=sltr.matches())
802        return list(inst)
803
804    def iter_userinstalled(self):
805        """Get iterator over the packages installed by the user."""
806        return (pkg for pkg in self.sack.query().installed()
807                if self.history.user_installed(pkg))
808
809    def _run_hawkey_goal(self, goal, allow_erasing):
810        ret = goal.run(
811            allow_uninstall=allow_erasing, force_best=self.conf.best,
812            ignore_weak_deps=(not self.conf.install_weak_deps))
813        if self.conf.debug_solver:
814            goal.write_debugdata('./debugdata/rpms')
815        return ret
816
817    def resolve(self, allow_erasing=False):
818        # :api
819        """Build the transaction set."""
820        exc = None
821        self._finalize_comps_trans()
822
823        timer = dnf.logging.Timer('depsolve')
824        self._ds_callback.start()
825        goal = self._goal
826        if goal.req_has_erase():
827            goal.push_userinstalled(self.sack.query().installed(),
828                                    self.history)
829        elif not self.conf.upgrade_group_objects_upgrade:
830            # exclude packages installed from groups
831            # these packages will be marked to installation
832            # which could prevent them from upgrade, downgrade
833            # to prevent "conflicting job" error it's not applied
834            # to "remove" and "reinstall" commands
835
836            solver = self._build_comps_solver()
837            solver._exclude_packages_from_installed_groups(self)
838
839        goal.add_protected(self.sack.query().filterm(
840            name=self.conf.protected_packages))
841        if not self._run_hawkey_goal(goal, allow_erasing):
842            if self.conf.debuglevel >= 6:
843                goal.log_decisions()
844            msg = dnf.util._format_resolve_problems(goal.problem_rules())
845            exc = dnf.exceptions.DepsolveError(msg)
846        else:
847            self._transaction = self._goal2transaction(goal)
848
849        self._ds_callback.end()
850        timer()
851
852        got_transaction = self._transaction is not None and \
853            len(self._transaction) > 0
854        if got_transaction:
855            msg = self._transaction._rpm_limitations()
856            if msg:
857                exc = dnf.exceptions.Error(msg)
858
859        if exc is not None:
860            raise exc
861
862        self._plugins.run_resolved()
863
864        # auto-enable module streams based on installed RPMs
865        new_pkgs = self._goal.list_installs()
866        new_pkgs += self._goal.list_upgrades()
867        new_pkgs += self._goal.list_downgrades()
868        new_pkgs += self._goal.list_reinstalls()
869        self.sack.set_modules_enabled_by_pkgset(self._moduleContainer, new_pkgs)
870
871        return got_transaction
872
873    def do_transaction(self, display=()):
874        # :api
875        if not isinstance(display, Sequence):
876            display = [display]
877        display = \
878            [dnf.yum.rpmtrans.LoggingTransactionDisplay()] + list(display)
879
880        if not self.transaction:
881            # packages are not changed, but comps and modules changes need to be committed
882            self._moduleContainer.save()
883            self._moduleContainer.updateFailSafeData()
884            if self._history and (self._history.group or self._history.env):
885                cmdline = None
886                if hasattr(self, 'args') and self.args:
887                    cmdline = ' '.join(self.args)
888                elif hasattr(self, 'cmds') and self.cmds:
889                    cmdline = ' '.join(self.cmds)
890                old = self.history.last()
891                if old is None:
892                    rpmdb_version = self.sack._rpmdb_version()
893                else:
894                    rpmdb_version = old.end_rpmdb_version
895
896                self.history.beg(rpmdb_version, [], [], cmdline)
897                self.history.end(rpmdb_version)
898            self._plugins.run_pre_transaction()
899            self._plugins.run_transaction()
900            self._trans_success = True
901            return
902
903        tid = None
904        logger.info(_('Running transaction check'))
905        lock = dnf.lock.build_rpmdb_lock(self.conf.persistdir,
906                                         self.conf.exit_on_lock)
907        with lock:
908            self.transaction._populate_rpm_ts(self._ts)
909
910            msgs = self._run_rpm_check()
911            if msgs:
912                msg = _('Error: transaction check vs depsolve:')
913                logger.error(msg)
914                for msg in msgs:
915                    logger.error(msg)
916                raise dnf.exceptions.TransactionCheckError(msg)
917
918            logger.info(_('Transaction check succeeded.'))
919
920            timer = dnf.logging.Timer('transaction test')
921            logger.info(_('Running transaction test'))
922
923            self._ts.order()  # order the transaction
924            self._ts.clean()  # release memory not needed beyond this point
925
926            testcb = dnf.yum.rpmtrans.RPMTransaction(self, test=True)
927            tserrors = self._ts.test(testcb)
928
929            if len(tserrors) > 0:
930                for msg in testcb.messages():
931                    logger.critical(_('RPM: {}').format(msg))
932                errstring = _('Transaction test error:') + '\n'
933                for descr in tserrors:
934                    errstring += '  %s\n' % ucd(descr)
935
936                summary = self._trans_error_summary(errstring)
937                if summary:
938                    errstring += '\n' + summary
939
940                raise dnf.exceptions.Error(errstring)
941            del testcb
942
943            logger.info(_('Transaction test succeeded.'))
944            #  With RPMTRANS_FLAG_TEST return just before anything is stored permanently
945            if self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
946                return
947            timer()
948
949            # save module states on disk right before entering rpm transaction,
950            # because we want system in recoverable state if transaction gets interrupted
951            self._moduleContainer.save()
952            self._moduleContainer.updateFailSafeData()
953
954            # unset the sigquit handler
955            timer = dnf.logging.Timer('transaction')
956            # setup our rpm ts callback
957            cb = dnf.yum.rpmtrans.RPMTransaction(self, displays=display)
958            if self.conf.debuglevel < 2:
959                for display_ in cb.displays:
960                    display_.output = False
961
962            self._plugins.run_pre_transaction()
963
964            logger.info(_('Running transaction'))
965            tid = self._run_transaction(cb=cb)
966        timer()
967        self._plugins.unload_removed_plugins(self.transaction)
968        self._plugins.run_transaction()
969
970        # log post transaction summary
971        def _pto_callback(action, tsis):
972            msgs = []
973            for tsi in tsis:
974                msgs.append('{}: {}'.format(action, str(tsi)))
975            return msgs
976        for msg in dnf.util._post_transaction_output(self, self.transaction, _pto_callback):
977            logger.debug(msg)
978
979        return tid
980
981    def _trans_error_summary(self, errstring):
982        """Parse the error string for 'interesting' errors which can
983        be grouped, such as disk space issues.
984
985        :param errstring: the error string
986        :return: a string containing a summary of the errors
987        """
988        summary = ''
989        # do disk space report first
990        p = re.compile(r'needs (\d+)(K|M)B(?: more space)? on the (\S+) filesystem')
991        disk = {}
992        for m in p.finditer(errstring):
993            size_in_mb = int(m.group(1)) if m.group(2) == 'M' else math.ceil(
994                int(m.group(1)) / 1024.0)
995            if m.group(3) not in disk:
996                disk[m.group(3)] = size_in_mb
997            if disk[m.group(3)] < size_in_mb:
998                disk[m.group(3)] = size_in_mb
999
1000        if disk:
1001            summary += _('Disk Requirements:') + "\n"
1002            for k in disk:
1003                summary += "   " + P_(
1004                    'At least {0}MB more space needed on the {1} filesystem.',
1005                    'At least {0}MB more space needed on the {1} filesystem.',
1006                    disk[k]).format(disk[k], k) + '\n'
1007
1008        if not summary:
1009            return None
1010
1011        summary = _('Error Summary') + '\n-------------\n' + summary
1012
1013        return summary
1014
1015    def _record_history(self):
1016        return self.conf.history_record and \
1017            not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST)
1018
1019    def _run_transaction(self, cb):
1020        """
1021        Perform the RPM transaction.
1022
1023        :return: history database transaction ID or None
1024        """
1025
1026        tid = None
1027        if self._record_history():
1028            using_pkgs_pats = list(self.conf.history_record_packages)
1029            installed_query = self.sack.query().installed()
1030            using_pkgs = installed_query.filter(name=using_pkgs_pats).run()
1031            rpmdbv = self.sack._rpmdb_version()
1032            lastdbv = self.history.last()
1033            if lastdbv is not None:
1034                lastdbv = lastdbv.end_rpmdb_version
1035
1036            if lastdbv is None or rpmdbv != lastdbv:
1037                logger.debug(_("RPMDB altered outside of {prog}.").format(
1038                    prog=dnf.util.MAIN_PROG_UPPER))
1039
1040            cmdline = None
1041            if hasattr(self, 'args') and self.args:
1042                cmdline = ' '.join(self.args)
1043            elif hasattr(self, 'cmds') and self.cmds:
1044                cmdline = ' '.join(self.cmds)
1045
1046            comment = self.conf.comment if self.conf.comment else ""
1047            tid = self.history.beg(rpmdbv, using_pkgs, [], cmdline, comment)
1048
1049        if self.conf.reset_nice:
1050            onice = os.nice(0)
1051            if onice:
1052                try:
1053                    os.nice(-onice)
1054                except:
1055                    onice = 0
1056
1057        logger.log(dnf.logging.DDEBUG, 'RPM transaction start.')
1058        errors = self._ts.run(cb.callback, '')
1059        logger.log(dnf.logging.DDEBUG, 'RPM transaction over.')
1060        # ts.run() exit codes are, hmm, "creative": None means all ok, empty
1061        # list means some errors happened in the transaction and non-empty
1062        # list that there were errors preventing the ts from starting...
1063        if self.conf.reset_nice:
1064            try:
1065                os.nice(onice)
1066            except:
1067                pass
1068        dnf.util._sync_rpm_trans_with_swdb(self._ts, self._transaction)
1069
1070        if errors is None:
1071            pass
1072        elif len(errors) == 0:
1073            # If there is no failing element it means that some "global" error
1074            # occurred (like rpm failed to obtain the transaction lock). Just pass
1075            # the rpm logs on to the user and raise an Error.
1076            # If there are failing elements the problem is related to those
1077            # elements and the Error is raised later, after saving the failure
1078            # to the history and printing out the transaction table to user.
1079            failed = [el for el in self._ts if el.Failed()]
1080            if not failed:
1081                for msg in cb.messages():
1082                    logger.critical(_('RPM: {}').format(msg))
1083                msg = _('Could not run transaction.')
1084                raise dnf.exceptions.Error(msg)
1085        else:
1086            logger.critical(_("Transaction couldn't start:"))
1087            for e in errors:
1088                logger.critical(ucd(e[0]))
1089            if self._record_history() and not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
1090                self.history.end(rpmdbv)
1091            msg = _("Could not run transaction.")
1092            raise dnf.exceptions.Error(msg)
1093
1094        for i in ('ts_all_fn', 'ts_done_fn'):
1095            if hasattr(cb, i):
1096                fn = getattr(cb, i)
1097                try:
1098                    misc.unlink_f(fn)
1099                except (IOError, OSError):
1100                    msg = _('Failed to remove transaction file %s')
1101                    logger.critical(msg, fn)
1102
1103        # keep install_set status because _verify_transaction will clean it
1104        self._trans_install_set = bool(self._transaction.install_set)
1105
1106        # sync up what just happened versus what is in the rpmdb
1107        if not self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
1108            self._verify_transaction(cb.verify_tsi_package)
1109
1110        return tid
1111
1112    def _verify_transaction(self, verify_pkg_cb=None):
1113        transaction_items = [
1114            tsi for tsi in self.transaction
1115            if tsi.action != libdnf.transaction.TransactionItemAction_REASON_CHANGE]
1116        total = len(transaction_items)
1117
1118        def display_banner(pkg, count):
1119            count += 1
1120            if verify_pkg_cb is not None:
1121                verify_pkg_cb(pkg, count, total)
1122            return count
1123
1124        timer = dnf.logging.Timer('verify transaction')
1125        count = 0
1126
1127        rpmdb_sack = dnf.sack.rpmdb_sack(self)
1128
1129        # mark group packages that are installed on the system as installed in the db
1130        q = rpmdb_sack.query().installed()
1131        names = set([i.name for i in q])
1132        for ti in self.history.group:
1133            g = ti.getCompsGroupItem()
1134            for p in g.getPackages():
1135                if p.getName() in names:
1136                    p.setInstalled(True)
1137                    p.save()
1138
1139        # TODO: installed groups in environments
1140
1141        # Post-transaction verification is no longer needed,
1142        # because DNF trusts error codes returned by RPM.
1143        # Verification banner is displayed to preserve UX.
1144        # TODO: drop in future DNF
1145        for tsi in transaction_items:
1146            count = display_banner(tsi.pkg, count)
1147
1148        rpmdbv = rpmdb_sack._rpmdb_version()
1149        self.history.end(rpmdbv)
1150
1151        timer()
1152        self._trans_success = True
1153
1154    def _download_remote_payloads(self, payloads, drpm, progress, callback_total):
1155        lock = dnf.lock.build_download_lock(self.conf.cachedir, self.conf.exit_on_lock)
1156        with lock:
1157            beg_download = time.time()
1158            est_remote_size = sum(pload.download_size for pload in payloads)
1159            total_drpm = len(
1160                [payload for payload in payloads if isinstance(payload, dnf.drpm.DeltaPayload)])
1161            # compatibility part for tools that do not accept total_drpms keyword
1162            if progress.start.__code__.co_argcount == 4:
1163                progress.start(len(payloads), est_remote_size, total_drpms=total_drpm)
1164            else:
1165                progress.start(len(payloads), est_remote_size)
1166            errors = dnf.repo._download_payloads(payloads, drpm)
1167
1168            if errors._irrecoverable:
1169                raise dnf.exceptions.DownloadError(errors._irrecoverable)
1170
1171            remote_size = sum(errors._bandwidth_used(pload)
1172                              for pload in payloads)
1173            saving = dnf.repo._update_saving((0, 0), payloads,
1174                                             errors._recoverable)
1175
1176            retries = self.conf.retries
1177            forever = retries == 0
1178            while errors._recoverable and (forever or retries > 0):
1179                if retries > 0:
1180                    retries -= 1
1181
1182                msg = _("Some packages were not downloaded. Retrying.")
1183                logger.info(msg)
1184
1185                remaining_pkgs = [pkg for pkg in errors._recoverable]
1186                payloads = \
1187                    [dnf.repo._pkg2payload(pkg, progress, dnf.repo.RPMPayload)
1188                     for pkg in remaining_pkgs]
1189                est_remote_size = sum(pload.download_size
1190                                      for pload in payloads)
1191                progress.start(len(payloads), est_remote_size)
1192                errors = dnf.repo._download_payloads(payloads, drpm)
1193
1194                if errors._irrecoverable:
1195                    raise dnf.exceptions.DownloadError(errors._irrecoverable)
1196
1197                remote_size += \
1198                    sum(errors._bandwidth_used(pload) for pload in payloads)
1199                saving = dnf.repo._update_saving(saving, payloads, {})
1200
1201            if errors._recoverable:
1202                msg = dnf.exceptions.DownloadError.errmap2str(
1203                    errors._recoverable)
1204                logger.info(msg)
1205
1206        if callback_total is not None:
1207            callback_total(remote_size, beg_download)
1208
1209        (real, full) = saving
1210        if real != full:
1211            if real < full:
1212                msg = _("Delta RPMs reduced %.1f MB of updates to %.1f MB "
1213                        "(%.1f%% saved)")
1214                percent = 100 - real / full * 100
1215            elif real > full:
1216                msg = _("Failed Delta RPMs increased %.1f MB of updates to %.1f MB "
1217                        "(%.1f%% wasted)")
1218                percent = 100 - full / real * 100
1219            logger.info(msg, full / 1024 ** 2, real / 1024 ** 2, percent)
1220
1221    def download_packages(self, pkglist, progress=None, callback_total=None):
1222        # :api
1223        """Download the packages specified by the given list of packages.
1224
1225        `pkglist` is a list of packages to download, `progress` is an optional
1226         DownloadProgress instance, `callback_total` an optional callback to
1227         output messages about the download operation.
1228
1229        """
1230        remote_pkgs, local_pkgs = self._select_remote_pkgs(pkglist)
1231        if remote_pkgs:
1232            if progress is None:
1233                progress = dnf.callback.NullDownloadProgress()
1234            drpm = dnf.drpm.DeltaInfo(self.sack.query().installed(),
1235                                      progress, self.conf.deltarpm_percentage)
1236            self._add_tempfiles([pkg.localPkg() for pkg in remote_pkgs])
1237            payloads = [dnf.repo._pkg2payload(pkg, progress, drpm.delta_factory,
1238                                              dnf.repo.RPMPayload)
1239                        for pkg in remote_pkgs]
1240            self._download_remote_payloads(payloads, drpm, progress, callback_total)
1241
1242        if self.conf.destdir:
1243            for pkg in local_pkgs:
1244                if pkg.baseurl:
1245                    location = os.path.join(pkg.get_local_baseurl(),
1246                                            pkg.location.lstrip("/"))
1247                else:
1248                    location = os.path.join(pkg.repo.pkgdir, pkg.location.lstrip("/"))
1249                shutil.copy(location, self.conf.destdir)
1250
1251    def add_remote_rpms(self, path_list, strict=True, progress=None):
1252        # :api
1253        pkgs = []
1254        if not path_list:
1255            return pkgs
1256        if self._goal.req_length():
1257            raise dnf.exceptions.Error(
1258                _("Cannot add local packages, because transaction job already exists"))
1259        pkgs_error = []
1260        for path in path_list:
1261            if not os.path.exists(path) and '://' in path:
1262                # download remote rpm to a tempfile
1263                path = dnf.util._urlopen_progress(path, self.conf, progress)
1264                self._add_tempfiles([path])
1265            try:
1266                pkgs.append(self.sack.add_cmdline_package(path))
1267            except IOError as e:
1268                logger.warning(e)
1269                pkgs_error.append(path)
1270        self._setup_excludes_includes(only_main=True)
1271        if pkgs_error and strict:
1272            raise IOError(_("Could not open: {}").format(' '.join(pkgs_error)))
1273        return pkgs
1274
1275    def _sig_check_pkg(self, po):
1276        """Verify the GPG signature of the given package object.
1277
1278        :param po: the package object to verify the signature of
1279        :return: (result, error_string)
1280           where result is::
1281
1282              0 = GPG signature verifies ok or verification is not required.
1283              1 = GPG verification failed but installation of the right GPG key
1284                    might help.
1285              2 = Fatal GPG verification error, give up.
1286        """
1287        if po._from_cmdline:
1288            check = self.conf.localpkg_gpgcheck
1289            hasgpgkey = 0
1290        else:
1291            repo = self.repos[po.repoid]
1292            check = repo.gpgcheck
1293            hasgpgkey = not not repo.gpgkey
1294
1295        if check:
1296            root = self.conf.installroot
1297            ts = dnf.rpm.transaction.initReadOnlyTransaction(root)
1298            sigresult = dnf.rpm.miscutils.checkSig(ts, po.localPkg())
1299            localfn = os.path.basename(po.localPkg())
1300            del ts
1301            if sigresult == 0:
1302                result = 0
1303                msg = ''
1304
1305            elif sigresult == 1:
1306                if hasgpgkey:
1307                    result = 1
1308                else:
1309                    result = 2
1310                msg = _('Public key for %s is not installed') % localfn
1311
1312            elif sigresult == 2:
1313                result = 2
1314                msg = _('Problem opening package %s') % localfn
1315
1316            elif sigresult == 3:
1317                if hasgpgkey:
1318                    result = 1
1319                else:
1320                    result = 2
1321                result = 1
1322                msg = _('Public key for %s is not trusted') % localfn
1323
1324            elif sigresult == 4:
1325                result = 2
1326                msg = _('Package %s is not signed') % localfn
1327
1328        else:
1329            result = 0
1330            msg = ''
1331
1332        return result, msg
1333
1334    def package_signature_check(self, pkg):
1335        # :api
1336        """Verify the GPG signature of the given package object.
1337
1338        :param pkg: the package object to verify the signature of
1339        :return: (result, error_string)
1340           where result is::
1341
1342              0 = GPG signature verifies ok or verification is not required.
1343              1 = GPG verification failed but installation of the right GPG key
1344                    might help.
1345              2 = Fatal GPG verification error, give up.
1346        """
1347        return self._sig_check_pkg(pkg)
1348
1349    def _clean_packages(self, packages):
1350        for fn in packages:
1351            if not os.path.exists(fn):
1352                continue
1353            try:
1354                misc.unlink_f(fn)
1355            except OSError:
1356                logger.warning(_('Cannot remove %s'), fn)
1357                continue
1358            else:
1359                logger.log(dnf.logging.DDEBUG,
1360                           _('%s removed'), fn)
1361
1362    def _do_package_lists(self, pkgnarrow='all', patterns=None, showdups=None,
1363                       ignore_case=False, reponame=None):
1364        """Return a :class:`misc.GenericHolder` containing
1365        lists of package objects.  The contents of the lists are
1366        specified in various ways by the arguments.
1367
1368        :param pkgnarrow: a string specifying which types of packages
1369           lists to produces, such as updates, installed, available,
1370           etc.
1371        :param patterns: a list of names or wildcards specifying
1372           packages to list
1373        :param showdups: whether to include duplicate packages in the
1374           lists
1375        :param ignore_case: whether to ignore case when searching by
1376           package names
1377        :param reponame: limit packages list to the given repository
1378        :return: a :class:`misc.GenericHolder` instance with the
1379           following lists defined::
1380
1381             available = list of packageObjects
1382             installed = list of packageObjects
1383             upgrades = tuples of packageObjects (updating, installed)
1384             extras = list of packageObjects
1385             obsoletes = tuples of packageObjects (obsoleting, installed)
1386             recent = list of packageObjects
1387        """
1388        if showdups is None:
1389            showdups = self.conf.showdupesfromrepos
1390        if patterns is None:
1391            return self._list_pattern(
1392                pkgnarrow, patterns, showdups, ignore_case, reponame)
1393
1394        assert not dnf.util.is_string_type(patterns)
1395        list_fn = functools.partial(
1396            self._list_pattern, pkgnarrow, showdups=showdups,
1397            ignore_case=ignore_case, reponame=reponame)
1398        if patterns is None or len(patterns) == 0:
1399            return list_fn(None)
1400        yghs = map(list_fn, patterns)
1401        return functools.reduce(lambda a, b: a.merge_lists(b), yghs)
1402
1403    def _list_pattern(self, pkgnarrow, pattern, showdups, ignore_case,
1404                      reponame=None):
1405        def is_from_repo(package):
1406            """Test whether given package originates from the repository."""
1407            if reponame is None:
1408                return True
1409            return self.history.repo(package) == reponame
1410
1411        def pkgs_from_repo(packages):
1412            """Filter out the packages which do not originate from the repo."""
1413            return (package for package in packages if is_from_repo(package))
1414
1415        def query_for_repo(query):
1416            """Filter out the packages which do not originate from the repo."""
1417            if reponame is None:
1418                return query
1419            return query.filter(reponame=reponame)
1420
1421        ygh = misc.GenericHolder(iter=pkgnarrow)
1422
1423        installed = []
1424        available = []
1425        reinstall_available = []
1426        old_available = []
1427        updates = []
1428        obsoletes = []
1429        obsoletesTuples = []
1430        recent = []
1431        extras = []
1432        autoremove = []
1433
1434        # do the initial pre-selection
1435        ic = ignore_case
1436        q = self.sack.query()
1437        if pattern is not None:
1438            subj = dnf.subject.Subject(pattern, ignore_case=ic)
1439            q = subj.get_best_query(self.sack, with_provides=False)
1440
1441        # list all packages - those installed and available:
1442        if pkgnarrow == 'all':
1443            dinst = {}
1444            ndinst = {}  # Newest versions by name.arch
1445            for po in q.installed():
1446                dinst[po.pkgtup] = po
1447                if showdups:
1448                    continue
1449                key = (po.name, po.arch)
1450                if key not in ndinst or po > ndinst[key]:
1451                    ndinst[key] = po
1452            installed = list(pkgs_from_repo(dinst.values()))
1453
1454            avail = query_for_repo(q.available())
1455            if not showdups:
1456                avail = avail.filterm(latest_per_arch_by_priority=True)
1457            for pkg in avail:
1458                if showdups:
1459                    if pkg.pkgtup in dinst:
1460                        reinstall_available.append(pkg)
1461                    else:
1462                        available.append(pkg)
1463                else:
1464                    key = (pkg.name, pkg.arch)
1465                    if pkg.pkgtup in dinst:
1466                        reinstall_available.append(pkg)
1467                    elif key not in ndinst or pkg.evr_gt(ndinst[key]):
1468                        available.append(pkg)
1469                    else:
1470                        old_available.append(pkg)
1471
1472        # produce the updates list of tuples
1473        elif pkgnarrow == 'upgrades':
1474            updates = query_for_repo(q).filterm(upgrades_by_priority=True)
1475            # reduce a query to security upgrades if they are specified
1476            updates = self._merge_update_filters(updates, upgrade=True)
1477            # reduce a query to latest packages
1478            updates = updates.latest().run()
1479
1480        # installed only
1481        elif pkgnarrow == 'installed':
1482            installed = list(pkgs_from_repo(q.installed()))
1483
1484        # available in a repository
1485        elif pkgnarrow == 'available':
1486            if showdups:
1487                avail = query_for_repo(q).available()
1488                installed_dict = q.installed()._na_dict()
1489                for avail_pkg in avail:
1490                    key = (avail_pkg.name, avail_pkg.arch)
1491                    installed_pkgs = installed_dict.get(key, [])
1492                    same_ver = [pkg for pkg in installed_pkgs
1493                                if pkg.evr == avail_pkg.evr]
1494                    if len(same_ver) > 0:
1495                        reinstall_available.append(avail_pkg)
1496                    else:
1497                        available.append(avail_pkg)
1498            else:
1499                # we will only look at the latest versions of packages:
1500                available_dict = query_for_repo(
1501                    q).available().filterm(latest_per_arch_by_priority=True)._na_dict()
1502                installed_dict = q.installed().latest()._na_dict()
1503                for (name, arch) in available_dict:
1504                    avail_pkg = available_dict[(name, arch)][0]
1505                    inst_pkg = installed_dict.get((name, arch), [None])[0]
1506                    if not inst_pkg or avail_pkg.evr_gt(inst_pkg):
1507                        available.append(avail_pkg)
1508                    elif avail_pkg.evr_eq(inst_pkg):
1509                        reinstall_available.append(avail_pkg)
1510                    else:
1511                        old_available.append(avail_pkg)
1512
1513        # packages to be removed by autoremove
1514        elif pkgnarrow == 'autoremove':
1515            autoremove_q = query_for_repo(q)._unneeded(self.history.swdb)
1516            autoremove = autoremove_q.run()
1517
1518        # not in a repo but installed
1519        elif pkgnarrow == 'extras':
1520            extras = [pkg for pkg in q.extras() if is_from_repo(pkg)]
1521
1522        # obsoleting packages (and what they obsolete)
1523        elif pkgnarrow == 'obsoletes':
1524            inst = q.installed()
1525            obsoletes = query_for_repo(
1526                self.sack.query()).filter(obsoletes_by_priority=inst)
1527            # reduce a query to security upgrades if they are specified
1528            obsoletes = self._merge_update_filters(obsoletes, warning=False)
1529            obsoletesTuples = []
1530            for new in obsoletes:
1531                obsoleted_reldeps = new.obsoletes
1532                obsoletesTuples.extend(
1533                    [(new, old) for old in
1534                     inst.filter(provides=obsoleted_reldeps)])
1535
1536        # packages recently added to the repositories
1537        elif pkgnarrow == 'recent':
1538            avail = q.available()
1539            if not showdups:
1540                avail = avail.filterm(latest_per_arch_by_priority=True)
1541            recent = query_for_repo(avail)._recent(self.conf.recent)
1542
1543        ygh.installed = installed
1544        ygh.available = available
1545        ygh.reinstall_available = reinstall_available
1546        ygh.old_available = old_available
1547        ygh.updates = updates
1548        ygh.obsoletes = obsoletes
1549        ygh.obsoletesTuples = obsoletesTuples
1550        ygh.recent = recent
1551        ygh.extras = extras
1552        ygh.autoremove = autoremove
1553
1554        return ygh
1555
1556    def _add_comps_trans(self, trans):
1557        self._comps_trans += trans
1558        return len(trans)
1559
1560    def _remove_if_unneeded(self, query):
1561        """
1562        Mark to remove packages that are not required by any user installed package (reason group
1563        or user)
1564        :param query: dnf.query.Query() object
1565        """
1566        query = query.installed()
1567        if not query:
1568            return
1569
1570        unneeded_pkgs = query._safe_to_remove(self.history.swdb, debug_solver=False)
1571        unneeded_pkgs_history = query.filter(
1572            pkg=[i for i in query if self.history.group.is_removable_pkg(i.name)])
1573        pkg_with_dependent_pkgs = unneeded_pkgs_history.difference(unneeded_pkgs)
1574
1575        # mark packages with dependent packages as a dependency to allow removal with dependent
1576        # package
1577        for pkg in pkg_with_dependent_pkgs:
1578            self.history.set_reason(pkg, libdnf.transaction.TransactionItemReason_DEPENDENCY)
1579        unneeded_pkgs = unneeded_pkgs.intersection(unneeded_pkgs_history)
1580
1581        remove_packages = query.intersection(unneeded_pkgs)
1582        if remove_packages:
1583            for pkg in remove_packages:
1584                self._goal.erase(pkg, clean_deps=self.conf.clean_requirements_on_remove)
1585
1586    def _finalize_comps_trans(self):
1587        trans = self._comps_trans
1588        basearch = self.conf.substitutions['basearch']
1589
1590        def trans_upgrade(query, remove_query, comps_pkg):
1591            sltr = dnf.selector.Selector(self.sack)
1592            sltr.set(pkg=query)
1593            self._goal.upgrade(select=sltr)
1594            return remove_query
1595
1596        def trans_install(query, remove_query, comps_pkg, strict):
1597            if self.conf.multilib_policy == "all":
1598                if not comps_pkg.requires:
1599                    self._install_multiarch(query, strict=strict)
1600                else:
1601                    # it installs only one arch for conditional packages
1602                    installed_query = query.installed().apply()
1603                    self._report_already_installed(installed_query)
1604                    sltr = dnf.selector.Selector(self.sack)
1605                    sltr.set(provides="({} if {})".format(comps_pkg.name, comps_pkg.requires))
1606                    self._goal.install(select=sltr, optional=not strict)
1607
1608            else:
1609                sltr = dnf.selector.Selector(self.sack)
1610                if comps_pkg.requires:
1611                    sltr.set(provides="({} if {})".format(comps_pkg.name, comps_pkg.requires))
1612                else:
1613                    if self.conf.obsoletes:
1614                        query = query.union(self.sack.query().filterm(obsoletes=query))
1615                    sltr.set(pkg=query)
1616                self._goal.install(select=sltr, optional=not strict)
1617            return remove_query
1618
1619        def trans_remove(query, remove_query, comps_pkg):
1620            remove_query = remove_query.union(query)
1621            return remove_query
1622
1623        remove_query = self.sack.query().filterm(empty=True)
1624        attr_fn = ((trans.install, functools.partial(trans_install, strict=True)),
1625                   (trans.install_opt, functools.partial(trans_install, strict=False)),
1626                   (trans.upgrade, trans_upgrade),
1627                   (trans.remove, trans_remove))
1628
1629        for (attr, fn) in attr_fn:
1630            for comps_pkg in attr:
1631                query_args = {'name': comps_pkg.name}
1632                if (comps_pkg.basearchonly):
1633                    query_args.update({'arch': basearch})
1634                q = self.sack.query().filterm(**query_args).apply()
1635                q.filterm(arch__neq=["src", "nosrc"])
1636                if not q:
1637                    package_string = comps_pkg.name
1638                    if comps_pkg.basearchonly:
1639                        package_string += '.' + basearch
1640                    logger.warning(_('No match for group package "{}"').format(package_string))
1641                    continue
1642                remove_query = fn(q, remove_query, comps_pkg)
1643                self._goal.group_members.add(comps_pkg.name)
1644
1645        self._remove_if_unneeded(remove_query)
1646
1647    def _build_comps_solver(self):
1648        def reason_fn(pkgname):
1649            q = self.sack.query().installed().filterm(name=pkgname)
1650            if not q:
1651                return None
1652            try:
1653                return self.history.rpm.get_reason(q[0])
1654            except AttributeError:
1655                return libdnf.transaction.TransactionItemReason_UNKNOWN
1656
1657        return dnf.comps.Solver(self.history, self._comps, reason_fn)
1658
1659    def environment_install(self, env_id, types, exclude=None, strict=True, exclude_groups=None):
1660        # :api
1661        """Installs packages of environment group identified by env_id.
1662        :param types: Types of packages to install. Either an integer as a
1663            logical conjunction of CompsPackageType ids or a list of string
1664            package type ids (conditional, default, mandatory, optional).
1665        """
1666        assert dnf.util.is_string_type(env_id)
1667        solver = self._build_comps_solver()
1668
1669        if not isinstance(types, int):
1670            types = libdnf.transaction.listToCompsPackageType(types)
1671
1672        trans = dnf.comps.install_or_skip(solver._environment_install,
1673                                          env_id, types, exclude or set(),
1674                                          strict, exclude_groups)
1675        if not trans:
1676            return 0
1677        return self._add_comps_trans(trans)
1678
1679    def environment_remove(self, env_id):
1680        # :api
1681        assert dnf.util.is_string_type(env_id)
1682        solver = self._build_comps_solver()
1683        trans = solver._environment_remove(env_id)
1684        return self._add_comps_trans(trans)
1685
1686    def group_install(self, grp_id, pkg_types, exclude=None, strict=True):
1687        # :api
1688        """Installs packages of selected group
1689        :param pkg_types: Types of packages to install. Either an integer as a
1690            logical conjunction of CompsPackageType ids or a list of string
1691            package type ids (conditional, default, mandatory, optional).
1692        :param exclude: list of package name glob patterns
1693            that will be excluded from install set
1694        :param strict: boolean indicating whether group packages that
1695            exist but are non-installable due to e.g. dependency
1696            issues should be skipped (False) or cause transaction to
1697            fail to resolve (True)
1698        """
1699        def _pattern_to_pkgname(pattern):
1700            if dnf.util.is_glob_pattern(pattern):
1701                q = self.sack.query().filterm(name__glob=pattern)
1702                return map(lambda p: p.name, q)
1703            else:
1704                return (pattern,)
1705
1706        assert dnf.util.is_string_type(grp_id)
1707        exclude_pkgnames = None
1708        if exclude:
1709            nested_excludes = [_pattern_to_pkgname(p) for p in exclude]
1710            exclude_pkgnames = itertools.chain.from_iterable(nested_excludes)
1711
1712        solver = self._build_comps_solver()
1713
1714        if not isinstance(pkg_types, int):
1715            pkg_types = libdnf.transaction.listToCompsPackageType(pkg_types)
1716
1717        trans = dnf.comps.install_or_skip(solver._group_install,
1718                                          grp_id, pkg_types, exclude_pkgnames,
1719                                          strict)
1720        if not trans:
1721            return 0
1722        if strict:
1723            instlog = trans.install
1724        else:
1725            instlog = trans.install_opt
1726        logger.debug(_("Adding packages from group '%s': %s"),
1727                     grp_id, instlog)
1728        return self._add_comps_trans(trans)
1729
1730    def env_group_install(self, patterns, types, strict=True, exclude=None, exclude_groups=None):
1731        q = CompsQuery(self.comps, self.history, CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
1732                       CompsQuery.AVAILABLE)
1733        cnt = 0
1734        done = True
1735        for pattern in patterns:
1736            try:
1737                res = q.get(pattern)
1738            except dnf.exceptions.CompsError as err:
1739                logger.error(ucd(err))
1740                done = False
1741                continue
1742            for group_id in res.groups:
1743                if not exclude_groups or group_id not in exclude_groups:
1744                    cnt += self.group_install(group_id, types, exclude=exclude, strict=strict)
1745            for env_id in res.environments:
1746                cnt += self.environment_install(env_id, types, exclude=exclude, strict=strict,
1747                                                exclude_groups=exclude_groups)
1748        if not done and strict:
1749            raise dnf.exceptions.Error(_('Nothing to do.'))
1750        return cnt
1751
1752    def group_remove(self, grp_id):
1753        # :api
1754        assert dnf.util.is_string_type(grp_id)
1755        solver = self._build_comps_solver()
1756        trans = solver._group_remove(grp_id)
1757        return self._add_comps_trans(trans)
1758
1759    def env_group_remove(self, patterns):
1760        q = CompsQuery(self.comps, self.history,
1761                       CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
1762                       CompsQuery.INSTALLED)
1763        try:
1764            res = q.get(*patterns)
1765        except dnf.exceptions.CompsError as err:
1766            logger.error("Warning: %s", ucd(err))
1767            raise dnf.exceptions.Error(_('No groups marked for removal.'))
1768        cnt = 0
1769        for env in res.environments:
1770            cnt += self.environment_remove(env)
1771        for grp in res.groups:
1772            cnt += self.group_remove(grp)
1773        return cnt
1774
1775    def env_group_upgrade(self, patterns):
1776        q = CompsQuery(self.comps, self.history,
1777                       CompsQuery.GROUPS | CompsQuery.ENVIRONMENTS,
1778                       CompsQuery.INSTALLED)
1779        group_upgraded = False
1780        for pattern in patterns:
1781            try:
1782                res = q.get(pattern)
1783            except dnf.exceptions.CompsError as err:
1784                logger.error(ucd(err))
1785                continue
1786            for env in res.environments:
1787                try:
1788                    self.environment_upgrade(env)
1789                    group_upgraded = True
1790                except dnf.exceptions.CompsError as err:
1791                    logger.error(ucd(err))
1792                    continue
1793            for grp in res.groups:
1794                try:
1795                    self.group_upgrade(grp)
1796                    group_upgraded = True
1797                except dnf.exceptions.CompsError as err:
1798                    logger.error(ucd(err))
1799                    continue
1800        if not group_upgraded:
1801            msg = _('No group marked for upgrade.')
1802            raise dnf.cli.CliError(msg)
1803
1804    def environment_upgrade(self, env_id):
1805        # :api
1806        assert dnf.util.is_string_type(env_id)
1807        solver = self._build_comps_solver()
1808        trans = solver._environment_upgrade(env_id)
1809        return self._add_comps_trans(trans)
1810
1811    def group_upgrade(self, grp_id):
1812        # :api
1813        assert dnf.util.is_string_type(grp_id)
1814        solver = self._build_comps_solver()
1815        trans = solver._group_upgrade(grp_id)
1816        return self._add_comps_trans(trans)
1817
1818    def _gpg_key_check(self):
1819        """Checks for the presence of GPG keys in the rpmdb.
1820
1821        :return: 0 if there are no GPG keys in the rpmdb, and 1 if
1822           there are keys
1823        """
1824        gpgkeyschecked = self.conf.cachedir + '/.gpgkeyschecked.yum'
1825        if os.path.exists(gpgkeyschecked):
1826            return 1
1827
1828        installroot = self.conf.installroot
1829        myts = dnf.rpm.transaction.initReadOnlyTransaction(root=installroot)
1830        myts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))
1831        idx = myts.dbMatch('name', 'gpg-pubkey')
1832        keys = len(idx)
1833        del idx
1834        del myts
1835
1836        if keys == 0:
1837            return 0
1838        else:
1839            mydir = os.path.dirname(gpgkeyschecked)
1840            if not os.path.exists(mydir):
1841                os.makedirs(mydir)
1842
1843            fo = open(gpgkeyschecked, 'w')
1844            fo.close()
1845            del fo
1846            return 1
1847
1848    def _install_multiarch(self, query, reponame=None, strict=True):
1849        already_inst, available = self._query_matches_installed(query)
1850        self._report_already_installed(already_inst)
1851        for packages in available:
1852            sltr = dnf.selector.Selector(self.sack)
1853            q = self.sack.query().filterm(pkg=packages)
1854            if self.conf.obsoletes:
1855                q = q.union(self.sack.query().filterm(obsoletes=q))
1856            sltr = sltr.set(pkg=q)
1857            if reponame is not None:
1858                sltr = sltr.set(reponame=reponame)
1859            self._goal.install(select=sltr, optional=(not strict))
1860        return len(available)
1861
1862    def _categorize_specs(self, install, exclude):
1863        """
1864        Categorize :param install and :param exclude list into two groups each (packages and groups)
1865
1866        :param install: list of specs, whether packages ('foo') or groups/modules ('@bar')
1867        :param exclude: list of specs, whether packages ('foo') or groups/modules ('@bar')
1868        :return: categorized install and exclude specs (stored in argparse.Namespace class)
1869
1870        To access packages use: specs.pkg_specs,
1871        to access groups use: specs.grp_specs
1872        """
1873        install_specs = argparse.Namespace()
1874        exclude_specs = argparse.Namespace()
1875        _parse_specs(install_specs, install)
1876        _parse_specs(exclude_specs, exclude)
1877
1878        return install_specs, exclude_specs
1879
1880    def _exclude_package_specs(self, exclude_specs):
1881        glob_excludes = [exclude for exclude in exclude_specs.pkg_specs
1882                         if dnf.util.is_glob_pattern(exclude)]
1883        excludes = [exclude for exclude in exclude_specs.pkg_specs
1884                    if exclude not in glob_excludes]
1885
1886        exclude_query = self.sack.query().filter(name=excludes)
1887        glob_exclude_query = self.sack.query().filter(name__glob=glob_excludes)
1888
1889        self.sack.add_excludes(exclude_query)
1890        self.sack.add_excludes(glob_exclude_query)
1891
1892    def _expand_groups(self, group_specs):
1893        groups = set()
1894        q = CompsQuery(self.comps, self.history,
1895                       CompsQuery.ENVIRONMENTS | CompsQuery.GROUPS,
1896                       CompsQuery.AVAILABLE | CompsQuery.INSTALLED)
1897
1898        for pattern in group_specs:
1899            try:
1900                res = q.get(pattern)
1901            except dnf.exceptions.CompsError as err:
1902                logger.error("Warning: Module or %s", ucd(err))
1903                continue
1904
1905            groups.update(res.groups)
1906            groups.update(res.environments)
1907
1908            for environment_id in res.environments:
1909                environment = self.comps._environment_by_id(environment_id)
1910                for group in environment.groups_iter():
1911                    groups.add(group.id)
1912
1913        return list(groups)
1914
1915    def _install_groups(self, group_specs, excludes, skipped, strict=True):
1916        for group_spec in group_specs:
1917            try:
1918                types = self.conf.group_package_types
1919
1920                if '/' in group_spec:
1921                    split = group_spec.split('/')
1922                    group_spec = split[0]
1923                    types = split[1].split(',')
1924
1925                self.env_group_install([group_spec], types, strict, excludes.pkg_specs,
1926                                       excludes.grp_specs)
1927            except dnf.exceptions.Error:
1928                skipped.append("@" + group_spec)
1929
1930    def install_specs(self, install, exclude=None, reponame=None, strict=True, forms=None):
1931        # :api
1932        if exclude is None:
1933            exclude = []
1934        no_match_group_specs = []
1935        error_group_specs = []
1936        no_match_pkg_specs = []
1937        error_pkg_specs = []
1938        install_specs, exclude_specs = self._categorize_specs(install, exclude)
1939
1940        self._exclude_package_specs(exclude_specs)
1941        for spec in install_specs.pkg_specs:
1942            try:
1943                self.install(spec, reponame=reponame, strict=strict, forms=forms)
1944            except dnf.exceptions.MarkingError as e:
1945                logger.error(str(e))
1946                no_match_pkg_specs.append(spec)
1947        no_match_module_specs = []
1948        module_depsolv_errors = ()
1949        if WITH_MODULES and install_specs.grp_specs:
1950            try:
1951                module_base = dnf.module.module_base.ModuleBase(self)
1952                module_base.install(install_specs.grp_specs, strict)
1953            except dnf.exceptions.MarkingErrors as e:
1954                if e.no_match_group_specs:
1955                    for e_spec in e.no_match_group_specs:
1956                        no_match_module_specs.append(e_spec)
1957                if e.error_group_specs:
1958                    for e_spec in e.error_group_specs:
1959                        error_group_specs.append("@" + e_spec)
1960                module_depsolv_errors = e.module_depsolv_errors
1961
1962        else:
1963            no_match_module_specs = install_specs.grp_specs
1964
1965        if no_match_module_specs:
1966            exclude_specs.grp_specs = self._expand_groups(exclude_specs.grp_specs)
1967            self._install_groups(no_match_module_specs, exclude_specs, no_match_group_specs, strict)
1968
1969        if no_match_group_specs or error_group_specs or no_match_pkg_specs or error_pkg_specs \
1970                or module_depsolv_errors:
1971            raise dnf.exceptions.MarkingErrors(no_match_group_specs=no_match_group_specs,
1972                                               error_group_specs=error_group_specs,
1973                                               no_match_pkg_specs=no_match_pkg_specs,
1974                                               error_pkg_specs=error_pkg_specs,
1975                                               module_depsolv_errors=module_depsolv_errors)
1976
1977    def install(self, pkg_spec, reponame=None, strict=True, forms=None):
1978        # :api
1979        """Mark package(s) given by pkg_spec and reponame for installation."""
1980
1981        subj = dnf.subject.Subject(pkg_spec)
1982        solution = subj.get_best_solution(self.sack, forms=forms, with_src=False)
1983
1984        if self.conf.multilib_policy == "all" or subj._is_arch_specified(solution):
1985            q = solution['query']
1986            if reponame is not None:
1987                q.filterm(reponame=reponame)
1988            if not q:
1989                self._raise_package_not_found_error(pkg_spec, forms, reponame)
1990            return self._install_multiarch(q, reponame=reponame, strict=strict)
1991
1992        elif self.conf.multilib_policy == "best":
1993            sltrs = subj._get_best_selectors(self,
1994                                             forms=forms,
1995                                             obsoletes=self.conf.obsoletes,
1996                                             reponame=reponame,
1997                                             reports=True,
1998                                             solution=solution)
1999            if not sltrs:
2000                self._raise_package_not_found_error(pkg_spec, forms, reponame)
2001
2002            for sltr in sltrs:
2003                self._goal.install(select=sltr, optional=(not strict))
2004            return 1
2005        return 0
2006
2007    def package_downgrade(self, pkg, strict=False):
2008        # :api
2009        if pkg._from_system:
2010            msg = 'downgrade_package() for an installed package.'
2011            raise NotImplementedError(msg)
2012
2013        q = self.sack.query().installed().filterm(name=pkg.name, arch=[pkg.arch, "noarch"])
2014        if not q:
2015            msg = _("Package %s not installed, cannot downgrade it.")
2016            logger.warning(msg, pkg.name)
2017            raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name)
2018        elif sorted(q)[0] > pkg:
2019            sltr = dnf.selector.Selector(self.sack)
2020            sltr.set(pkg=[pkg])
2021            self._goal.install(select=sltr, optional=(not strict))
2022            return 1
2023        else:
2024            msg = _("Package %s of lower version already installed, "
2025                    "cannot downgrade it.")
2026            logger.warning(msg, pkg.name)
2027            return 0
2028
2029    def package_install(self, pkg, strict=True):
2030        # :api
2031        q = self.sack.query()._nevra(pkg.name, pkg.evr, pkg.arch)
2032        already_inst, available = self._query_matches_installed(q)
2033        if pkg in already_inst:
2034            self._report_already_installed([pkg])
2035        elif pkg not in itertools.chain.from_iterable(available):
2036            raise dnf.exceptions.PackageNotFoundError(_('No match for argument: %s') % pkg.location)
2037        else:
2038            sltr = dnf.selector.Selector(self.sack)
2039            sltr.set(pkg=[pkg])
2040            self._goal.install(select=sltr, optional=(not strict))
2041        return 1
2042
2043    def package_reinstall(self, pkg):
2044        if self.sack.query().installed().filterm(name=pkg.name, evr=pkg.evr, arch=pkg.arch):
2045            self._goal.install(pkg)
2046            return 1
2047        msg = _("Package %s not installed, cannot reinstall it.")
2048        logger.warning(msg, str(pkg))
2049        raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg.location, pkg.name)
2050
2051    def package_remove(self, pkg):
2052        self._goal.erase(pkg)
2053        return 1
2054
2055    def package_upgrade(self, pkg):
2056        # :api
2057        if pkg._from_system:
2058            msg = 'upgrade_package() for an installed package.'
2059            raise NotImplementedError(msg)
2060
2061        if pkg.arch == 'src':
2062            msg = _("File %s is a source package and cannot be updated, ignoring.")
2063            logger.info(msg, pkg.location)
2064            return 0
2065        installed = self.sack.query().installed().apply()
2066        if self.conf.obsoletes and self.sack.query().filterm(pkg=[pkg]).filterm(obsoletes=installed):
2067            sltr = dnf.selector.Selector(self.sack)
2068            sltr.set(pkg=[pkg])
2069            self._goal.upgrade(select=sltr)
2070            return 1
2071        q = installed.filter(name=pkg.name, arch=[pkg.arch, "noarch"])
2072        if not q:
2073            msg = _("Package %s not installed, cannot update it.")
2074            logger.warning(msg, pkg.name)
2075            raise dnf.exceptions.MarkingError(
2076                _('No match for argument: %s') % pkg.location, pkg.name)
2077        elif sorted(q)[-1] < pkg:
2078            sltr = dnf.selector.Selector(self.sack)
2079            sltr.set(pkg=[pkg])
2080            self._goal.upgrade(select=sltr)
2081            return 1
2082        else:
2083            msg = _("The same or higher version of %s is already installed, "
2084                    "cannot update it.")
2085            logger.warning(msg, pkg.name)
2086            return 0
2087
2088    def _upgrade_internal(self, query, obsoletes, reponame, pkg_spec=None):
2089        installed_all = self.sack.query().installed()
2090        # Add only relevant obsoletes to transaction => installed, upgrades
2091        q = query.intersection(self.sack.query().filterm(name=[pkg.name for pkg in installed_all]))
2092        installed_query = q.installed()
2093        if obsoletes:
2094            obsoletes = self.sack.query().available().filterm(
2095                obsoletes=installed_query.union(q.upgrades()))
2096            # add obsoletes into transaction
2097            query = query.union(obsoletes)
2098        if reponame is not None:
2099            query.filterm(reponame=reponame)
2100        query = self._merge_update_filters(query, pkg_spec=pkg_spec, upgrade=True)
2101        if query:
2102            query = query.union(installed_query.latest())
2103            sltr = dnf.selector.Selector(self.sack)
2104            sltr.set(pkg=query)
2105            self._goal.upgrade(select=sltr)
2106        return 1
2107
2108
2109    def upgrade(self, pkg_spec, reponame=None):
2110        # :api
2111        subj = dnf.subject.Subject(pkg_spec)
2112        solution = subj.get_best_solution(self.sack)
2113        q = solution["query"]
2114        if q:
2115            wildcard = dnf.util.is_glob_pattern(pkg_spec)
2116            # wildcard shouldn't print not installed packages
2117            # only solution with nevra.name provide packages with same name
2118            if not wildcard and solution['nevra'] and solution['nevra'].name:
2119                pkg_name = solution['nevra'].name
2120                installed = self.sack.query().installed().apply()
2121                obsoleters = q.filter(obsoletes=installed) \
2122                    if self.conf.obsoletes else self.sack.query().filterm(empty=True)
2123                if not obsoleters:
2124                    installed_name = installed.filter(name=pkg_name).apply()
2125                    if not installed_name:
2126                        msg = _('Package %s available, but not installed.')
2127                        logger.warning(msg, pkg_name)
2128                        raise dnf.exceptions.PackagesNotInstalledError(
2129                            _('No match for argument: %s') % pkg_spec, pkg_spec)
2130                    elif solution['nevra'].arch and not dnf.util.is_glob_pattern(solution['nevra'].arch):
2131                        if not installed_name.filterm(arch=solution['nevra'].arch):
2132                            msg = _('Package %s available, but installed for different architecture.')
2133                            logger.warning(msg, "{}.{}".format(pkg_name, solution['nevra'].arch))
2134            obsoletes = self.conf.obsoletes and solution['nevra'] \
2135                        and solution['nevra'].has_just_name()
2136            return self._upgrade_internal(q, obsoletes, reponame, pkg_spec)
2137        raise dnf.exceptions.MarkingError(_('No match for argument: %s') % pkg_spec, pkg_spec)
2138
2139    def upgrade_all(self, reponame=None):
2140        # :api
2141        # provide only available packages to solver to trigger targeted upgrade
2142        # possibilities will be ignored
2143        # usage of selected packages will unify dnf behavior with other upgrade functions
2144        return self._upgrade_internal(
2145            self.sack.query(), self.conf.obsoletes, reponame, pkg_spec=None)
2146
2147    def distro_sync(self, pkg_spec=None):
2148        if pkg_spec is None:
2149            self._goal.distupgrade_all()
2150        else:
2151            subject = dnf.subject.Subject(pkg_spec)
2152            solution = subject.get_best_solution(self.sack, with_src=False)
2153            solution["query"].filterm(reponame__neq=hawkey.SYSTEM_REPO_NAME)
2154            sltrs = subject._get_best_selectors(self, solution=solution,
2155                                                obsoletes=self.conf.obsoletes, reports=True)
2156            if not sltrs:
2157                logger.info(_('No package %s installed.'), pkg_spec)
2158                return 0
2159            for sltr in sltrs:
2160                self._goal.distupgrade(select=sltr)
2161        return 1
2162
2163    def autoremove(self, forms=None, pkg_specs=None, grp_specs=None, filenames=None):
2164        # :api
2165        """Removes all 'leaf' packages from the system that were originally
2166        installed as dependencies of user-installed packages but which are
2167        no longer required by any such package."""
2168
2169        if any([grp_specs, pkg_specs, filenames]):
2170            pkg_specs += filenames
2171            done = False
2172            # Remove groups.
2173            if grp_specs and forms:
2174                for grp_spec in grp_specs:
2175                    msg = _('Not a valid form: %s')
2176                    logger.warning(msg, grp_spec)
2177            elif grp_specs:
2178                if self.env_group_remove(grp_specs):
2179                    done = True
2180
2181            for pkg_spec in pkg_specs:
2182                try:
2183                    self.remove(pkg_spec, forms=forms)
2184                except dnf.exceptions.MarkingError as e:
2185                    logger.info(str(e))
2186                else:
2187                    done = True
2188
2189            if not done:
2190                logger.warning(_('No packages marked for removal.'))
2191
2192        else:
2193            pkgs = self.sack.query()._unneeded(self.history.swdb,
2194                                               debug_solver=self.conf.debug_solver)
2195            for pkg in pkgs:
2196                self.package_remove(pkg)
2197
2198    def remove(self, pkg_spec, reponame=None, forms=None):
2199        # :api
2200        """Mark the specified package for removal."""
2201
2202        matches = dnf.subject.Subject(pkg_spec).get_best_query(self.sack, forms=forms)
2203        installed = [
2204            pkg for pkg in matches.installed()
2205            if reponame is None or
2206            self.history.repo(pkg) == reponame]
2207        if not installed:
2208            self._raise_package_not_installed_error(pkg_spec, forms, reponame)
2209
2210        clean_deps = self.conf.clean_requirements_on_remove
2211        for pkg in installed:
2212            self._goal.erase(pkg, clean_deps=clean_deps)
2213        return len(installed)
2214
2215    def reinstall(self, pkg_spec, old_reponame=None, new_reponame=None,
2216                  new_reponame_neq=None, remove_na=False):
2217        subj = dnf.subject.Subject(pkg_spec)
2218        q = subj.get_best_query(self.sack)
2219        installed_pkgs = [
2220            pkg for pkg in q.installed()
2221            if old_reponame is None or
2222            self.history.repo(pkg) == old_reponame]
2223
2224        available_q = q.available()
2225        if new_reponame is not None:
2226            available_q.filterm(reponame=new_reponame)
2227        if new_reponame_neq is not None:
2228            available_q.filterm(reponame__neq=new_reponame_neq)
2229        available_nevra2pkg = dnf.query._per_nevra_dict(available_q)
2230
2231        if not installed_pkgs:
2232            raise dnf.exceptions.PackagesNotInstalledError(
2233                'no package matched', pkg_spec, available_nevra2pkg.values())
2234
2235        cnt = 0
2236        clean_deps = self.conf.clean_requirements_on_remove
2237        for installed_pkg in installed_pkgs:
2238            try:
2239                available_pkg = available_nevra2pkg[ucd(installed_pkg)]
2240            except KeyError:
2241                if not remove_na:
2242                    continue
2243                self._goal.erase(installed_pkg, clean_deps=clean_deps)
2244            else:
2245                self._goal.install(available_pkg)
2246            cnt += 1
2247
2248        if cnt == 0:
2249            raise dnf.exceptions.PackagesNotAvailableError(
2250                'no package matched', pkg_spec, installed_pkgs)
2251
2252        return cnt
2253
2254    def downgrade(self, pkg_spec):
2255        # :api
2256        """Mark a package to be downgraded.
2257
2258        This is equivalent to first removing the currently installed package,
2259        and then installing an older version.
2260
2261        """
2262        return self.downgrade_to(pkg_spec)
2263
2264    def downgrade_to(self, pkg_spec, strict=False):
2265        """Downgrade to specific version if specified otherwise downgrades
2266        to one version lower than the package installed.
2267        """
2268        subj = dnf.subject.Subject(pkg_spec)
2269        q = subj.get_best_query(self.sack)
2270        if not q:
2271            msg = _('No match for argument: %s') % pkg_spec
2272            raise dnf.exceptions.PackageNotFoundError(msg, pkg_spec)
2273        done = 0
2274        available_pkgs = q.available()
2275        available_pkg_names = list(available_pkgs._name_dict().keys())
2276        q_installed = self.sack.query().installed().filterm(name=available_pkg_names)
2277        if len(q_installed) == 0:
2278            msg = _('Packages for argument %s available, but not installed.') % pkg_spec
2279            raise dnf.exceptions.PackagesNotInstalledError(msg, pkg_spec, available_pkgs)
2280        for pkg_name in q_installed._name_dict().keys():
2281            downgrade_pkgs = available_pkgs.downgrades().filter(name=pkg_name)
2282            if not downgrade_pkgs:
2283                msg = _("Package %s of lowest version already installed, cannot downgrade it.")
2284                logger.warning(msg, pkg_name)
2285                continue
2286            sltr = dnf.selector.Selector(self.sack)
2287            sltr.set(pkg=downgrade_pkgs)
2288            self._goal.install(select=sltr, optional=(not strict))
2289            done = 1
2290        return done
2291
2292    def provides(self, provides_spec):
2293        providers = self.sack.query().filterm(file__glob=provides_spec)
2294        if providers:
2295            return providers, [provides_spec]
2296        providers = dnf.query._by_provides(self.sack, provides_spec)
2297        if providers:
2298            return providers, [provides_spec]
2299        if provides_spec.startswith('/bin/') or provides_spec.startswith('/sbin/'):
2300            # compatibility for packages that didn't do UsrMove
2301            binary_provides = ['/usr' + provides_spec]
2302        elif provides_spec.startswith('/'):
2303            # provides_spec is a file path
2304            return providers, [provides_spec]
2305        else:
2306            # suppose that provides_spec is a command, search in /usr/sbin/
2307            binary_provides = [prefix + provides_spec
2308                               for prefix in ['/bin/', '/sbin/', '/usr/bin/', '/usr/sbin/']]
2309        return self.sack.query().filterm(file__glob=binary_provides), binary_provides
2310
2311    def add_security_filters(self, cmp_type, types=(), advisory=(), bugzilla=(), cves=(), severity=()):
2312        #  :api
2313        """
2314        It modifies results of install, upgrade, and distrosync methods according to provided
2315        filters.
2316
2317        :param cmp_type: only 'eq' or 'gte' allowed
2318        :param types: List or tuple with strings. E.g. 'bugfix', 'enhancement', 'newpackage',
2319        'security'
2320        :param advisory: List or tuple with strings. E.g.Eg. FEDORA-2201-123
2321        :param bugzilla: List or tuple with strings. Include packages that fix a Bugzilla ID,
2322        Eg. 123123.
2323        :param cves: List or tuple with strings. Include packages that fix a CVE
2324        (Common Vulnerabilities and Exposures) ID. Eg. CVE-2201-0123
2325        :param severity: List or tuple with strings. Includes packages that provide a fix
2326        for an issue of the specified severity.
2327        """
2328        cmp_dict = {'eq': '__eqg', 'gte': '__eqg__gt'}
2329        if cmp_type not in cmp_dict:
2330            raise ValueError("Unsupported value for `cmp_type`")
2331        cmp = cmp_dict[cmp_type]
2332        if types:
2333            key = 'advisory_type' + cmp
2334            self._update_security_options.setdefault(key, set()).update(types)
2335        if advisory:
2336            key = 'advisory' + cmp
2337            self._update_security_options.setdefault(key, set()).update(advisory)
2338        if bugzilla:
2339            key = 'advisory_bug' + cmp
2340            self._update_security_options.setdefault(key, set()).update(bugzilla)
2341        if cves:
2342            key = 'advisory_cve' + cmp
2343            self._update_security_options.setdefault(key, set()).update(cves)
2344        if severity:
2345            key = 'advisory_severity' + cmp
2346            self._update_security_options.setdefault(key, set()).update(severity)
2347
2348    def reset_security_filters(self):
2349        #  :api
2350        """
2351        Reset all security filters
2352        """
2353        self._update_security_options = {}
2354
2355    def _merge_update_filters(self, q, pkg_spec=None, warning=True, upgrade=False):
2356        """
2357        Merge Queries in _update_filters and return intersection with q Query
2358        @param q: Query
2359        @return: Query
2360        """
2361        if not (self._update_security_options or self._update_security_filters) or not q:
2362            return q
2363        merged_queries = self.sack.query().filterm(empty=True)
2364        if self._update_security_filters:
2365            for query in self._update_security_filters:
2366                merged_queries = merged_queries.union(query)
2367
2368            self._update_security_filters = [merged_queries]
2369        if self._update_security_options:
2370            for filter_name, values in self._update_security_options.items():
2371                if upgrade:
2372                    filter_name = filter_name + '__upgrade'
2373                kwargs = {filter_name: values}
2374                merged_queries = merged_queries.union(q.filter(**kwargs))
2375
2376        merged_queries = q.intersection(merged_queries)
2377        if not merged_queries:
2378            if warning:
2379                q = q.upgrades()
2380                count = len(q._name_dict().keys())
2381                if count > 0:
2382                    if pkg_spec is None:
2383                        msg1 = _("No security updates needed, but {} update "
2384                                 "available").format(count)
2385                        msg2 = _("No security updates needed, but {} updates "
2386                                 "available").format(count)
2387                        logger.warning(P_(msg1, msg2, count))
2388                    else:
2389                        msg1 = _('No security updates needed for "{}", but {} '
2390                                 'update available').format(pkg_spec, count)
2391                        msg2 = _('No security updates needed for "{}", but {} '
2392                                 'updates available').format(pkg_spec, count)
2393                        logger.warning(P_(msg1, msg2, count))
2394        return merged_queries
2395
2396    def _get_key_for_package(self, po, askcb=None, fullaskcb=None):
2397        """Retrieve a key for a package. If needed, use the given
2398        callback to prompt whether the key should be imported.
2399
2400        :param po: the package object to retrieve the key of
2401        :param askcb: Callback function to use to ask permission to
2402           import a key.  The arguments *askcb* should take are the
2403           package object, the userid of the key, and the keyid
2404        :param fullaskcb: Callback function to use to ask permission to
2405           import a key.  This differs from *askcb* in that it gets
2406           passed a dictionary so that we can expand the values passed.
2407        :raises: :class:`dnf.exceptions.Error` if there are errors
2408           retrieving the keys
2409        """
2410        if po._from_cmdline:
2411            # raise an exception, because po.repoid is not in self.repos
2412            msg = _('Unable to retrieve a key for a commandline package: %s')
2413            raise ValueError(msg % po)
2414
2415        repo = self.repos[po.repoid]
2416        key_installed = repo.id in self._repo_set_imported_gpg_keys
2417        keyurls = [] if key_installed else repo.gpgkey
2418
2419        def _prov_key_data(msg):
2420            msg += _('. Failing package is: %s') % (po) + '\n '
2421            msg += _('GPG Keys are configured as: %s') % \
2422                    (', '.join(repo.gpgkey))
2423            return msg
2424
2425        user_cb_fail = False
2426        self._repo_set_imported_gpg_keys.add(repo.id)
2427        for keyurl in keyurls:
2428            keys = dnf.crypto.retrieve(keyurl, repo)
2429
2430            for info in keys:
2431                # Check if key is already installed
2432                if misc.keyInstalled(self._ts, info.rpm_id, info.timestamp) >= 0:
2433                    msg = _('GPG key at %s (0x%s) is already installed')
2434                    logger.info(msg, keyurl, info.short_id)
2435                    continue
2436
2437                # DNS Extension: create a key object, pass it to the verification class
2438                # and print its result as an advice to the user.
2439                if self.conf.gpgkey_dns_verification:
2440                    dns_input_key = dnf.dnssec.KeyInfo.from_rpm_key_object(info.userid,
2441                                                                           info.raw_key)
2442                    dns_result = dnf.dnssec.DNSSECKeyVerification.verify(dns_input_key)
2443                    logger.info(dnf.dnssec.nice_user_msg(dns_input_key, dns_result))
2444
2445                # Try installing/updating GPG key
2446                info.url = keyurl
2447                if self.conf.gpgkey_dns_verification:
2448                    dnf.crypto.log_dns_key_import(info, dns_result)
2449                else:
2450                    dnf.crypto.log_key_import(info)
2451                rc = False
2452                if self.conf.assumeno:
2453                    rc = False
2454                elif self.conf.assumeyes:
2455                    # DNS Extension: We assume, that the key is trusted in case it is valid,
2456                    # its existence is explicitly denied or in case the domain is not signed
2457                    # and therefore there is no way to know for sure (this is mainly for
2458                    # backward compatibility)
2459                    # FAQ:
2460                    # * What is PROVEN_NONEXISTENCE?
2461                    #    In DNSSEC, your domain does not need to be signed, but this state
2462                    #    (not signed) has to be proven by the upper domain. e.g. when example.com.
2463                    #    is not signed, com. servers have to sign the message, that example.com.
2464                    #    does not have any signing key (KSK to be more precise).
2465                    if self.conf.gpgkey_dns_verification:
2466                        if dns_result in (dnf.dnssec.Validity.VALID,
2467                                          dnf.dnssec.Validity.PROVEN_NONEXISTENCE):
2468                            rc = True
2469                            logger.info(dnf.dnssec.any_msg(_("The key has been approved.")))
2470                        else:
2471                            rc = False
2472                            logger.info(dnf.dnssec.any_msg(_("The key has been rejected.")))
2473                    else:
2474                        rc = True
2475
2476                # grab the .sig/.asc for the keyurl, if it exists if it
2477                # does check the signature on the key if it is signed by
2478                # one of our ca-keys for this repo or the global one then
2479                # rc = True else ask as normal.
2480
2481                elif fullaskcb:
2482                    rc = fullaskcb({"po": po, "userid": info.userid,
2483                                    "hexkeyid": info.short_id,
2484                                    "keyurl": keyurl,
2485                                    "fingerprint": info.fingerprint,
2486                                    "timestamp": info.timestamp})
2487                elif askcb:
2488                    rc = askcb(po, info.userid, info.short_id)
2489
2490                if not rc:
2491                    user_cb_fail = True
2492                    continue
2493
2494                # Import the key
2495                # If rpm.RPMTRANS_FLAG_TEST in self._ts, gpg keys cannot be imported successfully
2496                # therefore the flag was removed for import operation
2497                test_flag = self._ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST)
2498                if test_flag:
2499                    orig_flags = self._ts.getTsFlags()
2500                    self._ts.setFlags(orig_flags - rpm.RPMTRANS_FLAG_TEST)
2501                result = self._ts.pgpImportPubkey(misc.procgpgkey(info.raw_key))
2502                if test_flag:
2503                    self._ts.setFlags(orig_flags)
2504                if result != 0:
2505                    msg = _('Key import failed (code %d)') % result
2506                    raise dnf.exceptions.Error(_prov_key_data(msg))
2507                logger.info(_('Key imported successfully'))
2508                key_installed = True
2509
2510        if not key_installed and user_cb_fail:
2511            raise dnf.exceptions.Error(_("Didn't install any keys"))
2512
2513        if not key_installed:
2514            msg = _('The GPG keys listed for the "%s" repository are '
2515                    'already installed but they are not correct for this '
2516                    'package.\n'
2517                    'Check that the correct key URLs are configured for '
2518                    'this repository.') % repo.name
2519            raise dnf.exceptions.Error(_prov_key_data(msg))
2520
2521        # Check if the newly installed keys helped
2522        result, errmsg = self._sig_check_pkg(po)
2523        if result != 0:
2524            if keyurls:
2525                msg = _("Import of key(s) didn't help, wrong key(s)?")
2526                logger.info(msg)
2527            errmsg = ucd(errmsg)
2528            raise dnf.exceptions.Error(_prov_key_data(errmsg))
2529
2530    def package_import_key(self, pkg, askcb=None, fullaskcb=None):
2531        # :api
2532        """Retrieve a key for a package. If needed, use the given
2533        callback to prompt whether the key should be imported.
2534
2535        :param pkg: the package object to retrieve the key of
2536        :param askcb: Callback function to use to ask permission to
2537           import a key.  The arguments *askcb* should take are the
2538           package object, the userid of the key, and the keyid
2539        :param fullaskcb: Callback function to use to ask permission to
2540           import a key.  This differs from *askcb* in that it gets
2541           passed a dictionary so that we can expand the values passed.
2542        :raises: :class:`dnf.exceptions.Error` if there are errors
2543           retrieving the keys
2544        """
2545        self._get_key_for_package(pkg, askcb, fullaskcb)
2546
2547    def _run_rpm_check(self):
2548        results = []
2549        self._ts.check()
2550        for prob in self._ts.problems():
2551            #  Newer rpm (4.8.0+) has problem objects, older have just strings.
2552            #  Should probably move to using the new objects, when we can. For
2553            # now just be compatible.
2554            results.append(ucd(prob))
2555
2556        return results
2557
2558    def urlopen(self, url, repo=None, mode='w+b', **kwargs):
2559        # :api
2560        """
2561        Open the specified absolute url, return a file object
2562        which respects proxy setting even for non-repo downloads
2563        """
2564        return dnf.util._urlopen(url, self.conf, repo, mode, **kwargs)
2565
2566    def _get_installonly_query(self, q=None):
2567        if q is None:
2568            q = self._sack.query(flags=hawkey.IGNORE_EXCLUDES)
2569        installonly = q.filter(provides=self.conf.installonlypkgs)
2570        return installonly
2571
2572    def _report_icase_hint(self, pkg_spec):
2573        subj = dnf.subject.Subject(pkg_spec, ignore_case=True)
2574        solution = subj.get_best_solution(self.sack, with_nevra=True,
2575                                          with_provides=False, with_filenames=False)
2576        if solution['query'] and solution['nevra'] and solution['nevra'].name and \
2577                pkg_spec != solution['query'][0].name:
2578            logger.info(_("  * Maybe you meant: {}").format(solution['query'][0].name))
2579
2580    def _select_remote_pkgs(self, install_pkgs):
2581        """ Check checksum of packages from local repositories and returns list packages from remote
2582        repositories that will be downloaded. Packages from commandline are skipped.
2583
2584        :param install_pkgs: list of packages
2585        :return: list of remote pkgs
2586        """
2587        def _verification_of_packages(pkg_list, logger_msg):
2588            all_packages_verified = True
2589            for pkg in pkg_list:
2590                pkg_successfully_verified = False
2591                try:
2592                    pkg_successfully_verified = pkg.verifyLocalPkg()
2593                except Exception as e:
2594                    logger.critical(str(e))
2595                if pkg_successfully_verified is not True:
2596                    logger.critical(logger_msg.format(pkg, pkg.reponame))
2597                    all_packages_verified = False
2598
2599            return all_packages_verified
2600
2601        remote_pkgs = []
2602        local_repository_pkgs = []
2603        for pkg in install_pkgs:
2604            if pkg._is_local_pkg():
2605                if pkg.reponame != hawkey.CMDLINE_REPO_NAME:
2606                    local_repository_pkgs.append(pkg)
2607            else:
2608                remote_pkgs.append(pkg)
2609
2610        msg = _('Package "{}" from local repository "{}" has incorrect checksum')
2611        if not _verification_of_packages(local_repository_pkgs, msg):
2612            raise dnf.exceptions.Error(
2613                _("Some packages from local repository have incorrect checksum"))
2614
2615        if self.conf.cacheonly:
2616            msg = _('Package "{}" from repository "{}" has incorrect checksum')
2617            if not _verification_of_packages(remote_pkgs, msg):
2618                raise dnf.exceptions.Error(
2619                    _('Some packages have invalid cache, but cannot be downloaded due to '
2620                      '"--cacheonly" option'))
2621            remote_pkgs = []
2622
2623        return remote_pkgs, local_repository_pkgs
2624
2625    def _report_already_installed(self, packages):
2626        for pkg in packages:
2627            _msg_installed(pkg)
2628
2629    def _raise_package_not_found_error(self, pkg_spec, forms, reponame):
2630        all_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES)
2631        subject = dnf.subject.Subject(pkg_spec)
2632        solution = subject.get_best_solution(
2633            self.sack, forms=forms, with_src=False, query=all_query)
2634        if reponame is not None:
2635            solution['query'].filterm(reponame=reponame)
2636        if not solution['query']:
2637            raise dnf.exceptions.PackageNotFoundError(_('No match for argument'), pkg_spec)
2638        else:
2639            with_regular_query = self.sack.query(flags=hawkey.IGNORE_REGULAR_EXCLUDES)
2640            with_regular_query = solution['query'].intersection(with_regular_query)
2641            # Modular filtering is applied on a package set that already has regular excludes
2642            # filtered out. So if a package wasn't filtered out by regular excludes, it must have
2643            # been filtered out by modularity.
2644            if with_regular_query:
2645                msg = _('All matches were filtered out by exclude filtering for argument')
2646            else:
2647                msg = _('All matches were filtered out by modular filtering for argument')
2648            raise dnf.exceptions.PackageNotFoundError(msg, pkg_spec)
2649
2650    def _raise_package_not_installed_error(self, pkg_spec, forms, reponame):
2651        all_query = self.sack.query(flags=hawkey.IGNORE_EXCLUDES).installed()
2652        subject = dnf.subject.Subject(pkg_spec)
2653        solution = subject.get_best_solution(
2654            self.sack, forms=forms, with_src=False, query=all_query)
2655
2656        if not solution['query']:
2657            raise dnf.exceptions.PackagesNotInstalledError(_('No match for argument'), pkg_spec)
2658        if reponame is not None:
2659            installed = [pkg for pkg in solution['query'] if self.history.repo(pkg) == reponame]
2660        else:
2661            installed = solution['query']
2662        if not installed:
2663            msg = _('All matches were installed from a different repository for argument')
2664        else:
2665            msg = _('All matches were filtered out by exclude filtering for argument')
2666        raise dnf.exceptions.PackagesNotInstalledError(msg, pkg_spec)
2667
2668    def setup_loggers(self):
2669        # :api
2670        """
2671        Setup DNF file loggers based on given configuration file. The loggers are set the same
2672        way as if DNF was run from CLI.
2673        """
2674        self._logging._setup_from_dnf_conf(self.conf, file_loggers_only=True)
2675
2676    def _skipped_packages(self, report_problems, transaction):
2677        """returns set of conflicting packages and set of packages with broken dependency that would
2678        be additionally installed when --best and --allowerasing"""
2679        if self._goal.actions & (hawkey.INSTALL | hawkey.UPGRADE | hawkey.UPGRADE_ALL):
2680            best = True
2681        else:
2682            best = False
2683        ng = deepcopy(self._goal)
2684        params = {"allow_uninstall": self._allow_erasing,
2685                  "force_best": best,
2686                  "ignore_weak": True}
2687        ret = ng.run(**params)
2688        if not ret and report_problems:
2689            msg = dnf.util._format_resolve_problems(ng.problem_rules())
2690            logger.warning(msg)
2691        problem_conflicts = set(ng.problem_conflicts(available=True))
2692        problem_dependency = set(ng.problem_broken_dependency(available=True)) - problem_conflicts
2693
2694        def _nevra(item):
2695            return hawkey.NEVRA(name=item.name, epoch=item.epoch, version=item.version,
2696                                release=item.release, arch=item.arch)
2697
2698        # Sometimes, pkg is not in transaction item, therefore, comparing by nevra
2699        transaction_nevras = [_nevra(tsi) for tsi in transaction]
2700        skipped_conflicts = set(
2701            [pkg for pkg in problem_conflicts if _nevra(pkg) not in transaction_nevras])
2702        skipped_dependency = set(
2703            [pkg for pkg in problem_dependency if _nevra(pkg) not in transaction_nevras])
2704
2705        return skipped_conflicts, skipped_dependency
2706
2707
2708def _msg_installed(pkg):
2709    name = ucd(pkg)
2710    msg = _('Package %s is already installed.')
2711    logger.info(msg, name)
2712