#1236 Port to libmodulemd v2
Merged 5 years ago by lsedlar. Opened 5 years ago by lsedlar.
lsedlar/pungi libmodulemd-v2  into  master

file modified
+2 -2
@@ -6,9 +6,9 @@ 

  

  try:

      import gi

-     gi.require_version('Modulemd', '1.0') # noqa

+     gi.require_version('Modulemd', '2.0') # noqa

      from gi.repository import Modulemd

- except:

+ except (ImportError, ValueError):

      Modulemd = None

  

  

file modified
+11 -11
@@ -36,7 +36,7 @@ 

  from ..util import (

      find_old_compose,

      get_arch_variant_data,

-     iter_module_defaults,

+     collect_module_defaults,

      temp_dir,

  )

  from pungi import Modulemd
@@ -196,22 +196,21 @@ 

  

      # call modifyrepo to inject modulemd if needed

      if pkg_type == "rpm" and arch in variant.arch_mmds and Modulemd is not None:

-         modules = []

+         mod_index = Modulemd.ModuleIndex()

          metadata = []

  

          for module_id, mmd in variant.arch_mmds[arch].items():

              if modules_metadata:

-                 module_rpms = mmd.peek_rpm_artifacts().dup()

+                 module_rpms = mmd.get_rpm_artifacts()

                  metadata.append((module_id, module_rpms))

-             modules.append(mmd)

+             mod_index.add_module_stream(mmd)

  

-         module_names = set([x.get_name() for x in modules])

-         for mmddef in iter_module_defaults(compose.paths.work.module_defaults_dir()):

-             if mmddef.peek_module_name() in module_names:

-                 modules.append(mmddef)

+         module_names = set(mod_index.get_module_names())

+         defaults_dir = compose.paths.work.module_defaults_dir()

+         collect_module_defaults(defaults_dir, module_names, mod_index)

  

          log_file = compose.paths.log.log_file(arch, "modifyrepo-modules-%s" % variant)

-         add_modular_metadata(repo, repo_dir, modules, log_file)

+         add_modular_metadata(repo, repo_dir, mod_index, log_file)

  

          for module_id, module_rpms in metadata:

              modulemd_path = os.path.join(
@@ -230,11 +229,12 @@ 

      compose.log_info("[DONE ] %s" % msg)

  

  

- def add_modular_metadata(repo, repo_path, mmd, log_file):

+ def add_modular_metadata(repo, repo_path, mod_index, log_file):

      """Add modular metadata into a repository."""

      with temp_dir() as tmp_dir:

          modules_path = os.path.join(tmp_dir, "modules.yaml")

-         Modulemd.dump(mmd, modules_path)

+         with open(modules_path, "w") as f:

+             f.write(mod_index.dump_to_string())

  

          cmd = repo.get_modifyrepo_cmd(

              os.path.join(repo_path, "repodata"),

@@ -29,10 +29,10 @@ 

  

  from pungi import Modulemd

  from pungi.compose import get_ordered_variant_uids

- from pungi.arch import get_compatible_arches, split_name_arch, tree_arch_to_yum_arch

+ from pungi.arch import get_compatible_arches, split_name_arch

  from pungi.phases.base import PhaseBase

  from pungi.util import (get_arch_data, get_arch_variant_data, get_variant_data,

-                         makedirs, iter_module_defaults)

+                         makedirs, collect_module_defaults)

  from pungi.phases.createrepo import add_modular_metadata

  

  
@@ -377,23 +377,18 @@ 

  

      # Add modular metadata into the repo

      if variant.arch_mmds:

-         mmds = []

+         mod_index = Modulemd.ModuleIndex()

          for mmd in variant.arch_mmds[arch].values():

-             # Set the arch field, but no other changes are needed.

-             repo_mmd = mmd.copy()

-             repo_mmd.set_arch(tree_arch_to_yum_arch(arch))

-             mmds.append(repo_mmd)

+             mod_index.add_module_stream(mmd)

  

-         module_names = set([x.get_name() for x in mmds])

+         module_names = set(mod_index.get_module_names())

You probably don't need to wrap this in set(). You're guaranteed to get unique values from get_module_names(). The documentation isn't explicit here, which I will have to fix.

Same here, I think the set() is unnecessary.

it's not only about duplicates, but set gives us O(1) lookup instead of O(n).

          defaults_dir = compose.paths.work.module_defaults_dir()

-         for mmddef in iter_module_defaults(defaults_dir):

-             if mmddef.peek_module_name() in module_names:

-                 mmds.append(mmddef)

+         collect_module_defaults(defaults_dir, module_names, mod_index)

  

          log_file = compose.paths.log.log_file(

              arch, "lookaside_repo_modules_%s" % (variant.uid)

          )

-         add_modular_metadata(cr, repo, mmds, log_file)

+         add_modular_metadata(cr, repo, mod_index, log_file)

  

      compose.log_info('[DONE ] %s', msg)

  

@@ -142,7 +142,7 @@ 

      def prepare_modular_packages(self):

          for var in self.compose.all_variants.values():

              for mmd in var.arch_mmds.get(self.arch, {}).values():

-                 self.modular_packages.update(mmd.get_rpm_artifacts().dup())

+                 self.modular_packages.update(mmd.get_rpm_artifacts())

  

      def prepare_langpacks(self, arch, variant):

          if not self.compose.has_comps:
@@ -223,7 +223,7 @@ 

  

          modules = []

          for mmd in variant.arch_mmds.get(arch, {}).values():

-             modules.append("%s:%s" % (mmd.peek_name(), mmd.peek_stream()))

+             modules.append("%s:%s" % (mmd.get_module_name(), mmd.get_stream_name()))

  

          input_packages = []

          for pkg_name, pkg_arch in packages:
@@ -399,10 +399,10 @@ 

  

      for var in compose.all_variants.values():

          for mmd in var.arch_mmds.get(arch, {}).values():

-             for dep in mmd.peek_dependencies():

-                 streams = dep.peek_requires().get("platform")

+             for dep in mmd.get_dependencies():

+                 streams = dep.get_runtime_streams("platform")

                  if streams:

-                     platforms.update(streams.dup())

+                     platforms.update(streams)

  

      if len(platforms) > 1:

          raise RuntimeError("There are conflicting requests for platform.")

@@ -38,7 +38,7 @@ 

  

          compatible_arches = pungi.arch.get_compatible_arches(arch, multilib=True)

  

-         for nsvc, mmd in variant.arch_mmds[arch].items():

+         for nsvc, module_stream in variant.arch_mmds[arch].items():

              available_rpms = sum(

                  (

                      variant.nsvc_to_pkgset[nsvc].rpms_by_arch.get(a, [])
@@ -46,7 +46,7 @@ 

                  ),

                  [],

              )

-             to_include = set(mmd.get_rpm_artifacts().get())

+             to_include = set(module_stream.get_rpm_artifacts())

Again, probably an unnecessary set()

              for rpm_obj in available_rpms:

                  if rpm_obj.nevra in to_include:

                      packages.add((rpm_obj, None))

file modified
+2 -2
@@ -229,8 +229,8 @@ 

      """

      seen_defaults = collections.defaultdict(set)

  

-     for mmddef in iter_module_defaults(path):

-         seen_defaults[mmddef.peek_module_name()].add(mmddef.peek_default_stream())

+     for module_name, defaults in iter_module_defaults(path):

+         seen_defaults[module_name].add(defaults.get_default_stream())

  

      errors = []

      for module_name, defaults in seen_defaults.items():

@@ -20,9 +20,10 @@ 

  from kobo.threads import run_in_threads

  

  import pungi.phases.pkgset.pkgsets

+ from pungi import Modulemd

  from pungi.arch import get_valid_arches

  from pungi.wrappers.createrepo import CreaterepoWrapper

- from pungi.util import is_arch_multilib, find_old_compose, iter_module_defaults

+ from pungi.util import is_arch_multilib, find_old_compose, collect_module_defaults

  from pungi.phases.createrepo import add_modular_metadata

  

  
@@ -118,12 +119,14 @@ 

                                    update_md_path=repo_dir_global, checksum=createrepo_checksum)

      run(cmd, logfile=compose.paths.log.log_file(arch, "arch_repo"), show_cmd=True)

      # Add modulemd to the repo for all modules in all variants on this architecture.

-     mmds = list(iter_module_defaults(compose.paths.work.module_defaults_dir()))

-     for variant in compose.get_variants(arch=arch):

-         mmds.extend(variant.arch_mmds.get(arch, {}).values())

-     if mmds:

+     if Modulemd:

+         mod_index = collect_module_defaults(compose.paths.work.module_defaults_dir())

+ 

+         for variant in compose.get_variants(arch=arch):

+             for module_stream in variant.arch_mmds.get(arch, {}).values():

+                 mod_index.add_module_stream(module_stream)

          add_modular_metadata(

-             repo, repo_dir, mmds, compose.paths.log.log_file(arch, "arch_repo_modulemd")

+             repo, repo_dir, mod_index, compose.paths.log.log_file(arch, "arch_repo_modulemd")

          )

  

      compose.log_info("[DONE ] %s" % msg)

@@ -247,7 +247,9 @@ 

  

      for arch in variant.arches:

          try:

-             mmd = Modulemd.Module.new_from_file(mmds["modulemd.%s.txt" % arch])

+             mmd = Modulemd.ModuleStream.read_file(

+                 mmds["modulemd.%s.txt" % arch], strict=True

+             )

              variant.arch_mmds.setdefault(arch, {})[nsvc] = mmd

          except KeyError:

              # There is no modulemd for this arch. This could mean an arch was
@@ -662,11 +664,9 @@ 

                          # Not current tag, skip it

                          continue

                      for arch_modules in variant.arch_mmds.values():

-                         arch_mmd = arch_modules[nsvc]

-                         if arch_mmd:

-                             for rpm_nevra in arch_mmd.get_rpm_artifacts().get():

-                                 nevra = parse_nvra(rpm_nevra)

-                                 modular_packages.add((nevra["name"], nevra["arch"]))

+                         for rpm_nevra in arch_modules[nsvc].get_rpm_artifacts():

+                             nevra = parse_nvra(rpm_nevra)

+                             modular_packages.add((nevra["name"], nevra["arch"]))

  

              pkgset.populate(

                  compose_tag,

file modified
+30 -4
@@ -927,12 +927,38 @@ 

  

  

  def iter_module_defaults(path):

-     """Given a path to a directory with yaml files, yield each module default in there.

+     """Given a path to a directory with yaml files, yield each module default

+     in there as a pair (module_name, ModuleDefaults instance).

      """

+     # It is really tempting to merge all the module indexes into a single one

+     # and work with it. However that does not allow for detecting conflicting

+     # defaults. That should not happen in practice, but better safe than sorry.

+     # Once libmodulemd can report the error, this code can be simplifed by a

+     # lot. It's implemented in

+     # https://github.com/fedora-modularity/libmodulemd/commit/3087e4a5c38a331041fec9b6b8f1a372f9ffe64d

+     # and released in 2.6.0

      for file in glob.glob(os.path.join(path, "*.yaml")):

-         for mmddef in Modulemd.objects_from_file(file):

-             if isinstance(mmddef, Modulemd.Defaults):

-                 yield mmddef

+         index = Modulemd.ModuleIndex()

+         index.update_from_file(file, strict=False)

+         for module_name in index.get_module_names():

+             yield module_name, index.get_module(module_name).get_defaults()

+ 

+ 

+ def collect_module_defaults(defaults_dir, modules_to_load=None, mod_index=None):

+     """Load module defaults into index.

+ 

+     If `modules_to_load` is passed in, it should be a set of module names. Only

+     defaults for these modules will be loaded.

+ 

+     If `mod_index` is passed in, it will be updated and returned. If it was

+     not, a new ModuleIndex will be created and returned

+     """

+     mod_index = mod_index or Modulemd.ModuleIndex()

+     for module_name, defaults in iter_module_defaults(defaults_dir):

+         if not modules_to_load or module_name in modules_to_load:

+             mod_index.add_defaults(defaults)

+ 

+     return mod_index

  

  

  def load_config(file_path, defaults={}):

file modified
+13 -19
@@ -86,36 +86,30 @@ 

              # No support for modules

              return

          name, stream, version, context = nsvc.split(":")

-         mmd = Modulemd.Module()

-         mmd.set_mdversion(2)

-         mmd.set_name(name)

-         mmd.set_stream(stream)

-         mmd.set_version(int(version))

-         mmd.set_context(context)

-         mmd.set_summary("foo")

-         mmd.set_description("foo")

-         licenses = Modulemd.SimpleSet()

-         licenses.add("GPL")

-         mmd.set_module_licenses(licenses)

+         module_stream = Modulemd.ModuleStreamV2.new(name, stream)

+         module_stream.set_version(int(version))

+         module_stream.set_context(context)

+         module_stream.set_summary("foo")

+         module_stream.set_description("foo")

+         module_stream.add_module_license("GPL")

  

          if rpm_nvrs:

-             artifacts = Modulemd.SimpleSet()

              for rpm_nvr in rpm_nvrs:

-                 artifacts.add(rpm_nvr)

                  rpm_name = parse_nvr(rpm_nvr)["name"]

-                 component = Modulemd.ComponentRpm()

+                 component = Modulemd.ComponentRpm.new(rpm_nvr)

                  component.set_name(rpm_name)

                  component.set_rationale("Needed for test")

-                 mmd.add_rpm_component(component)

-             if with_artifacts:

-                 mmd.set_rpm_artifacts(artifacts)

+                 module_stream.add_component(component)

+                 if with_artifacts:

+                     module_stream.add_rpm_artifact(rpm_nvr)

  

          if self.modules is None:

              self.modules = []

          self.modules.append(":".join([name, stream, version]))

          if mmd_arch:

-             self.arch_mmds.setdefault(mmd_arch, {})[mmd.dup_nsvc()] = mmd

-         return mmd

+             nsvc = module_stream.get_nsvc()

Can you explain why you're using get_nsvc() here and not get_NSVCA()? I worry this could bite us in the future if we ever decided to support multiple architectures in the same repo. However, I don't have a good sense of the whole process to see if this is really an issue or not.

There's no particular reason. It's a reasonable change, but it needs to happen in a few other places as well. It can be done separately from this PR. I'll open a ticket.

+             self.arch_mmds.setdefault(mmd_arch, {})[nsvc] = module_stream

+         return module_stream

  

  

  class IterableMock(mock.Mock):

file modified
+26 -20
@@ -118,6 +118,24 @@ 

               mock.call((compose, None, compose.variants['Everything'], 'srpm', phase.modules_metadata))])

  

  

+ def make_mocked_modifyrepo_cmd(tc, module_artifacts):

+     def mocked_modifyrepo_cmd(repodir, mmd_path, **kwargs):

+         mod_index = Modulemd.ModuleIndex.new()

+         mod_index.update_from_file(mmd_path, strict=True)

+ 

+         tc.assertEqual(len(mod_index.get_module_names()), 1)

+ 

+         module = mod_index.get_module("test")

+         module_streams = module.get_all_streams()

+         tc.assertEqual(len(module_streams), len(module_artifacts))

+         for ms in module_streams:

+             tc.assertIn(ms.get_stream_name(), module_artifacts)

+             tc.assertItemsEqual(

+                 ms.get_rpm_artifacts(), module_artifacts[ms.get_stream_name()],

+             )

+     return mocked_modifyrepo_cmd

+ 

+ 

  class TestCreateVariantRepo(PungiTestCase):

  

      @mock.patch('pungi.phases.createrepo.run')
@@ -742,17 +760,10 @@ 

          variant.arch_mmds["x86_64"]["test:f28:1:2017"] = variant.add_fake_module(

              "test:f28:1:2017", rpm_nvrs=["pkg-0:2.0.0-1.x86_64"])

  

-         def mocked_modifyrepo_cmd(repodir, mmd_path, **kwargs):

-             modules = Modulemd.Module.new_all_from_file(mmd_path)

-             self.assertEqual(len(modules), 2)

-             self.assertItemsEqual([m.get_stream() for m in modules],

-                                   ["f27", "f28"])

-             self.assertItemsEqual(

-                 [m.get_rpm_artifacts().get() for m in modules],

-                 [[], []])

- 

          repo = CreaterepoWrapperCls.return_value

-         repo.get_modifyrepo_cmd.side_effect = mocked_modifyrepo_cmd

+         repo.get_modifyrepo_cmd.side_effect = make_mocked_modifyrepo_cmd(

+             self, {"f27": [], "f28": []}

+         )

          copy_fixture('server-rpms.json', compose.paths.compose.metadata('rpms.json'))

  

          repodata_dir = os.path.join(
@@ -796,17 +807,12 @@ 

              "test:f27:2018:cafe": "tag-2",

          }

  

-         def mocked_modifyrepo_cmd(repodir, mmd_path, **kwargs):

-             modules = Modulemd.Module.new_all_from_file(mmd_path)

-             self.assertEqual(len(modules), 2)

-             self.assertItemsEqual([m.get_stream() for m in modules],

-                                   ["f27", "f28"])

-             self.assertItemsEqual(

-                 [m.get_rpm_artifacts().get() for m in modules],

-                 [["bash-0:4.3.30-2.fc21.x86_64"], ["pkg-0:2.0.0-1.x86_64"]])

- 

          repo = CreaterepoWrapperCls.return_value

-         repo.get_modifyrepo_cmd.side_effect = mocked_modifyrepo_cmd

+         repo.get_modifyrepo_cmd.side_effect = make_mocked_modifyrepo_cmd(

+             self,

+             {"f27": ["bash-0:4.3.30-2.fc21.x86_64"], "f28": ["pkg-0:2.0.0-1.x86_64"]},

+         )

+ 

          copy_fixture('server-rpms.json', compose.paths.compose.metadata('rpms.json'))

  

          repodata_dir = os.path.join(

@@ -209,39 +209,26 @@ 

      def get_name(self):

          return self.name

  

-     def peek_name(self):

+     def get_module_name(self):

          return self.name

  

-     def peek_stream(self):

+     def get_stream_name(self):

          return self.stream

  

-     def peek_version(self):

+     def get_version(self):

          return self.version

  

-     def peek_context(self):

+     def get_context(self):

          return self.context

  

-     def peek_dependencies(self):

-         return [

-             mock.Mock(

-                 peek_requires=mock.Mock(

-                     return_value={

-                         "platform": mock.Mock(

-                             dup=mock.Mock(return_value=[self.platform])

-                         )

-                     }

-                 )

-             )

-         ]

- 

-     def copy(self):

-         return self

- 

-     def set_arch(self, arch):

-         pass

+     def get_dependencies(self):

+         def get_runtime_streams(platform):

+             assert platform == "platform"

+             return [self.platform]

+         return [mock.Mock(get_runtime_streams=get_runtime_streams)]

  

      def get_rpm_artifacts(self):

-         return mock.Mock(dup=mock.Mock(return_value=self.rpms))

+         return self.rpms

  

  

  class HelperMixin(object):
@@ -308,10 +295,10 @@ 

          self.compose.has_comps = False

          self.compose.variants["Server"].arch_mmds["x86_64"] = {

              "mod:master": mock.Mock(

-                 peek_name=mock.Mock(return_value="mod"),

-                 peek_stream=mock.Mock(return_value="master"),

-                 peek_version=mock.Mock(return_value="ver"),

-                 peek_context=mock.Mock(return_value="ctx"),

+                 get_module_name=mock.Mock(return_value="mod"),

+                 get_stream_name=mock.Mock(return_value="master"),

+                 get_version=mock.Mock(return_value="ver"),

+                 get_context=mock.Mock(return_value="ctx"),

              )

          }

          po.return_value = ([], ["m1"])
@@ -356,16 +343,16 @@ 

          self.compose.has_comps = False

          self.compose.variants["Server"].arch_mmds["x86_64"] = {

              "mod:master": mock.Mock(

-                 peek_name=mock.Mock(return_value="mod"),

-                 peek_stream=mock.Mock(return_value="master"),

-                 peek_version=mock.Mock(return_value="ver"),

-                 peek_context=mock.Mock(return_value="ctx"),

+                 get_module_name=mock.Mock(return_value="mod"),

+                 get_stream_name=mock.Mock(return_value="master"),

+                 get_version=mock.Mock(return_value="ver"),

+                 get_context=mock.Mock(return_value="ctx"),

              ),

              "mod-devel:master": mock.Mock(

-                 peek_name=mock.Mock(return_value="mod-devel"),

-                 peek_stream=mock.Mock(return_value="master"),

-                 peek_version=mock.Mock(return_value="ver"),

-                 peek_context=mock.Mock(return_value="ctx"),

+                 get_module_name=mock.Mock(return_value="mod-devel"),

+                 get_stream_name=mock.Mock(return_value="master"),

+                 get_version=mock.Mock(return_value="ver"),

+                 get_context=mock.Mock(return_value="ctx"),

              ),

          }

          po.return_value = ([("p-1-1", "x86_64", frozenset())], ["m1"])

file modified
+6 -6
@@ -542,13 +542,13 @@ 

      def _write_defaults(self, defs):

          for mod_name, streams in defs.items():

              for stream in streams:

-                 mmddef = Modulemd.Defaults.new()

-                 mmddef.set_version(1)

-                 mmddef.set_module_name(mod_name)

+                 mod_index = Modulemd.ModuleIndex.new()

+                 mmddef = Modulemd.DefaultsV1.new(mod_name)

                  mmddef.set_default_stream(stream)

-                 mmddef.dump(

-                     os.path.join(self.topdir, "%s-%s.yaml" % (mod_name, stream))

-                 )

+                 mod_index.add_defaults(mmddef)

+                 filename = "%s-%s.yaml" % (mod_name, stream)

+                 with open(os.path.join(self.topdir, filename), "w") as f:

+                     f.write(mod_index.dump_to_string())

  

      def test_valid_files(self):

          self._write_defaults({"httpd": ["1"], "python": ["3.6"]})

@@ -675,7 +675,7 @@ 

  

  

  class MockModule(object):

-     def __init__(self, path):

+     def __init__(self, path, strict=True):

          self.path = path

  

      def __repr__(self):
@@ -685,7 +685,7 @@ 

          return self.path == other.path

  

  

- @mock.patch("pungi.Modulemd.Module.new_from_file", new=MockModule)

+ @mock.patch("pungi.Modulemd.ModuleStream.read_file", new=MockModule)

  @unittest.skipIf(Modulemd is None, "Skipping tests, no module support")

  class TestAddModuleToVariant(unittest.TestCase):

  

You probably don't need to wrap this in set(). You're guaranteed to get unique values from get_module_names(). The documentation isn't explicit here, which I will have to fix.

Same here, I think the set() is unnecessary.

I think we probably want to be using strict=True here. DNF uses strict=False as a preventative measure to avoid failing if we extend the format, but I think when we're generating the repodata, we probably don't want to ignore unknown keys. It implies that we've loaded an older version of libmodulemd to create the repodata than we used when building the module in MBS.

Is there a specific case you're worried about failing on here?

https://github.com/fedora-modularity/libmodulemd/commit/3087e4a5c38a331041fec9b6b8f1a372f9ffe64d (included in libmodulemd 2.6.0) now offers ModuleIndexMerger.resolve_ext(strict_default_streams=True) which will do what you're asking for here.

As above, I think we want to be strict when generating the repositories.

Might want to rephrase this as "Returns a ModuleIndex. If mod_index was passed in, it will be updated and returned. If it was not, a new ModuleIndex will be instantiated and returned."

This logic is hard to follow. I'd recommend renaming module_names or making the docstring more clear that module_names means "collect only defaults for modules whose name matches one in this list".

Can you explain why you're using get_nsvc() here and not get_NSVCA()? I worry this could bite us in the future if we ever decided to support multiple architectures in the same repo. However, I don't have a good sense of the whole process to see if this is really an issue or not.

For the most part, I think this looks good. Most of my comments are minor.

it's not only about duplicates, but set gives us O(1) lookup instead of O(n).

I was thinking that we don't want to prohibit old modules from being included. It would protect against backwards incompatible changes, which I hope should not happen. I'll switch the default.

Yup, that is great. Let's give it a few weeks for the new libmodulemd to get released and pushed as update, so that we don't have to coordinate the releases. There is no pressure. I'll update the comment to include the links.

There's no particular reason. It's a reasonable change, but it needs to happen in a few other places as well. It can be done separately from this PR. I'll open a ticket.

rebased onto 34a380a85b6160d3006736a04dd60188bb33b896

5 years ago

I was thinking that we don't want to prohibit old modules from being included. It would protect against backwards incompatible changes, which I hope should not happen. I'll switch the default.

I don't understand your statement here. How would it prohibit old modules from being included? The strictness function here is exclusively about how it should handle module metadata that contains YAML data that libmodulemd doesn't understand. There are only two ways this can happen:

  1. The YAML was generated by a newer version of libmodulemd than pungi is using which has new functionality. In this case, we should make sure that we update the version pungi is linking against.
  2. The YAML is malformed. In this case, it should be rejected.

it's not only about duplicates, but set gives us O(1) lookup instead of O(n).

I hadn't considered that usage. Makes sense, then.

(Strictly speaking, it's maintained as a set internally, but the python GObject bindings don't support the python set() type.)

How would it prohibit old modules from being included?

The YAML was generated with older libmodulemd, and newer libmodulemd no longer recognizes some keys in there. Hopefully this should not happen.

How would it prohibit old modules from being included?

The YAML was generated with older libmodulemd, and newer libmodulemd no longer recognizes some keys in there. Hopefully this should not happen.

This would be a serious regression in libmodulemd. If we made a backwards-incompatible change like that, it would necessitate a change to the spec and a new ModuleStreamV3 object. Any other occurrence of this is a severe bug and would need to be fixed immediately (and would not be Pungi's responsibility to account for).

I didn't find issues with the code.

rebased onto 61e3cb0

5 years ago

Pull-Request has been merged by lsedlar

5 years ago