| |
@@ -5,8 +5,8 @@
|
| |
import os
|
| |
import shutil
|
| |
import tempfile
|
| |
+ import subprocess
|
| |
|
| |
- import dnf
|
| |
import kobo.rpmlib
|
| |
import koji
|
| |
import packaging.version
|
| |
@@ -230,18 +230,20 @@
|
| |
:param bool force_for_old_dnf: add the conflicts even if libdnf can't handle them
|
| |
:raise RuntimeError: when a Koji query fails
|
| |
"""
|
| |
- if (not force_for_old_dnf
|
| |
- and packaging.version.parse(dnf.VERSION) < packaging.version.parse('4.2.19')):
|
| |
- # For local builds, we can't use this code unless libdnf uses libmodulemd2
|
| |
- # (done in libdnf-0.45) - we can't check the libdnf version, so use
|
| |
- # dnf-4.2.19 (which requires libdnf-0.45) as a proxy.
|
| |
- log.warning(
|
| |
- "The necessary conflicts could not be generated due to RHBZ#1693683. "
|
| |
- "Some RPMs from the base modules (%s) may end up being used over modular RPMs. "
|
| |
- "This may result in different behavior than a production build.",
|
| |
- ", ".join(conf.base_module_names)
|
| |
- )
|
| |
- return
|
| |
+ # Commenting out for dnf workaround
|
| |
+ # See: https://pagure.io/releng/issue/10850
|
| |
+ #if (not force_for_old_dnf
|
| |
+ # and packaging.version.parse(dnf.VERSION) < packaging.version.parse('4.2.19')):
|
| |
+ # # For local builds, we can't use this code unless libdnf uses libmodulemd2
|
| |
+ # # (done in libdnf-0.45) - we can't check the libdnf version, so use
|
| |
+ # # dnf-4.2.19 (which requires libdnf-0.45) as a proxy.
|
| |
+ # log.warning(
|
| |
+ # "The necessary conflicts could not be generated due to RHBZ#1693683. "
|
| |
+ # "Some RPMs from the base modules (%s) may end up being used over modular RPMs. "
|
| |
+ # "This may result in different behavior than a production build.",
|
| |
+ # ", ".join(conf.base_module_names)
|
| |
+ # )
|
| |
+ # return
|
| |
|
| |
log.info("Finding any buildrequired modules that collide with the RPMs in the base modules")
|
| |
bm_tags = set()
|
| |
@@ -345,91 +347,10 @@
|
| |
|
| |
return nevras
|
| |
|
| |
-
|
| |
def _get_rpms_in_external_repo(repo_url, arches, cache_dir_name):
|
| |
- """
|
| |
- Get the available RPMs in the external repo for the provided arches.
|
| |
-
|
| |
- :param str repo_url: the URL of the external repo with the "$arch" variable included
|
| |
- :param list arches: the list of arches to query the external repo for
|
| |
- :param str cache_dir_name: the cache directory name under f"{conf.cache_dir}/dnf"
|
| |
- :return: a set of the RPM NEVRAs
|
| |
- :rtype: set
|
| |
- :raise RuntimeError: if the cache is not writeable or the external repo couldn't be loaded
|
| |
- :raises ValueError: if there is no "$arch" variable in repo URL
|
| |
- """
|
| |
- if "$arch" not in repo_url:
|
| |
- raise ValueError(
|
| |
- "The external repo {} does not contain the $arch variable".format(repo_url)
|
| |
- )
|
| |
-
|
| |
- base = dnf.Base()
|
| |
- try:
|
| |
- dnf_conf = base.conf
|
| |
- # Expire the metadata right away so that when a repo is loaded, it will always check to
|
| |
- # see if the external repo has been updated
|
| |
- dnf_conf.metadata_expire = 0
|
| |
+ # Calling an external script using subprocess to avoid importing dnf
|
| |
+ # See: https://pagure.io/releng/issue/10850
|
| |
+ nevras = subprocess.check_output(['mbs-get-rpms-in-external-repo', repo_url, cache_dir_name,
|
| |
+ conf.cache_dir, str(conf.dnf_timeout), str(conf.dnf_minrate)] + arches)
|
| |
|
| |
- cache_location = os.path.join(conf.cache_dir, "dnf", cache_dir_name)
|
| |
- try:
|
| |
- # exist_ok=True can't be used in Python 2
|
| |
- os.makedirs(cache_location, mode=0o0770)
|
| |
- except OSError as e:
|
| |
- # Don't fail if the directories already exist
|
| |
- if e.errno != errno.EEXIST:
|
| |
- log.exception("Failed to create the cache directory %s", cache_location)
|
| |
- raise RuntimeError("The MBS cache is not writeable.")
|
| |
-
|
| |
- # Tell DNF to use the cache directory
|
| |
- dnf_conf.cachedir = cache_location
|
| |
- # Don't skip repos that can't be synchronized
|
| |
- dnf_conf.skip_if_unavailable = False
|
| |
- dnf_conf.timeout = conf.dnf_timeout
|
| |
- # Get rid of everything to be sure it's a blank slate. This doesn't delete the cached repo
|
| |
- # data.
|
| |
- base.reset(repos=True, goal=True, sack=True)
|
| |
-
|
| |
- # Add a separate repo for each architecture
|
| |
- for arch in arches:
|
| |
- # Convert arch to canon_arch. This handles cases where Koji "i686" arch is mapped to
|
| |
- # "i386" when generating RPM repository.
|
| |
- canon_arch = koji.canonArch(arch)
|
| |
- repo_name = "repo_{}".format(canon_arch)
|
| |
- repo_arch_url = repo_url.replace("$arch", canon_arch)
|
| |
- base.repos.add_new_repo(
|
| |
- repo_name, dnf_conf, baseurl=[repo_arch_url], minrate=conf.dnf_minrate,
|
| |
- )
|
| |
-
|
| |
- try:
|
| |
- # Load the repos in parallel
|
| |
- base.update_cache()
|
| |
- except dnf.exceptions.RepoError:
|
| |
- msg = "Failed to load the external repos"
|
| |
- log.exception(msg)
|
| |
- raise RuntimeError(msg)
|
| |
-
|
| |
- # dnf will not always raise an error on repo failures, so we check explicitly
|
| |
- for repo_name in base.repos:
|
| |
- if not base.repos[repo_name].metadata:
|
| |
- msg = "Failed to load metadata for repo %s" % repo_name
|
| |
- log.exception(msg)
|
| |
- raise RuntimeError(msg)
|
| |
-
|
| |
- base.fill_sack(load_system_repo=False)
|
| |
-
|
| |
- # Return all the available RPMs
|
| |
- nevras = set()
|
| |
- for rpm in base.sack.query().available():
|
| |
- rpm_dict = {
|
| |
- "arch": rpm.arch,
|
| |
- "epoch": rpm.epoch,
|
| |
- "name": rpm.name,
|
| |
- "release": rpm.release,
|
| |
- "version": rpm.version,
|
| |
- }
|
| |
- nevra = kobo.rpmlib.make_nvra(rpm_dict, force_epoch=True)
|
| |
- nevras.add(nevra)
|
| |
- finally:
|
| |
- base.close()
|
| |
-
|
| |
- return nevras
|
| |
+ return set(nevras.split())
|
| |
Importing dnf in the MBS process causes a symbol clash on RHEL 7. This
is a temporary fix to avoid that, and should not be merged.
See https://pagure.io/releng/issue/10850