#26 prevent error by using sed, and catch new polib errors
Closed 3 years ago by jibecfed. Opened 3 years ago by jibecfed.

file modified
+1
@@ -5,3 +5,4 @@ 

  website/public/

  website/static/*

  srpms_*.lst

+ log.*

file modified
+14 -9
@@ -120,12 +120,14 @@ 

          metadata = dict()

          try:

              metadata = polib.pofile(file).metadata

-         except UnicodeDecodeError:

-             # encoding error, to investigate before using it in TM

-             metadata["Language"] = "error-unicode"

          except OSError:

              # maybe a polib bug? to investigate before using it in TM

              metadata["Language"] = "error-os"

+         except TypeError:

+             metadata["Language"] = "error-type"

+         except UnicodeDecodeError:

+             # encoding error, to investigate before using it in TM

+             metadata["Language"] = "error-unicode"

  

          if "Language" not in metadata.keys():

              metadata["Language"] = "zzz_null"
@@ -154,9 +156,7 @@ 

  

          results[metadata.get("Language")] = language

  

-     results = dict(sorted(results.items(), key=lambda item: item[0]))

- 

-     return results

+     return dict(sorted(results.items(), key=lambda item: item[0]))

  

  

  def describe(lang_folder):
@@ -189,8 +189,11 @@ 

  

      log_file = os.path.join(results_folder, "build_language_list.log")

      file_object = open(log_file, "w")

- 

+     count = 0

+     total = len(packages)

      for package in packages:

+         count += 1

+         log.debug("{c}/{t}".format(c=count, t=total))

          discovery_file = os.path.join(package_folder, package, "discover.json")

  

          with open(discovery_file, "r") as read_file:
@@ -212,13 +215,15 @@ 

                  except UnicodeDecodeError:

                      # encoding error, to investigate before using it in TM

                      error = "error-unicode"

+                 except TypeError:

+                     error = "error-type"

                  except OSError:

                      # maybe a polib bug? to investigate before using it in TM

                      error = "error-os"

  

                  lang, decision = choose_lang(lang_code, metadata, error)

  

-                 log = ",".join(

+                 debug = ",".join(

                      [

                          po,

                          lang_code,
@@ -228,7 +233,7 @@ 

                          str(decision),

                      ]

                  )

-                 file_object.write(log + "\n")

+                 file_object.write(debug + "\n")

  

                  lang_result = langs.get(lang, dict())

                  po_results = lang_result.get("po", list())

file modified
+30 -19
@@ -6,6 +6,8 @@ 

  import json

  import os

  import shutil

+ import subprocess

+ 

  import polib

  import logging

  
@@ -71,30 +73,31 @@ 

      for package in sorted(packages):

          count += 1

          log.info(" {c}/{t} - {p}".format(c=count, t=len(packages), p=package))

-         with open(os.path.join(packages_folder, package, "discover.json"), "r") as f:

-             discoveries = json.load(f)

  

          src_folder = os.path.join(packages_folder, package)

          stats_file = os.path.join(packages_stats_folder, package + ".json")

  

-         if os.path.isfile(stats_file):

-             continue

+         if os.path.isfile(stats_file) is False:

+             with open(os.path.join(packages_folder, package, "discover.json"), "r") as f:

+                 discoveries = json.load(f)

  

-         results = dict()

-         for discover in discoveries:

-             files = glob.glob(os.path.join(src_folder, discover["filemask"]))

+             results = dict()

+             for discover in discoveries:

+                 files = glob.glob(os.path.join(src_folder, discover["filemask"]))

  

-             if discover["file_format"] == "po":

-                 results[discover["filemask"]] = get_po_translation_level(

-                     files, stats_file

-                 )

+                 if discover["file_format"] == "po":

+                     results[discover["filemask"]] = get_po_translation_level(

+                         files, stats_file

+                     )

  

-         if len(results) > 0:

-             distribution_stats = extract_release_stats(distribution_stats, results)

+             if len(results) > 0:

+                 with open(stats_file, "w") as f:

+                     json.dump(results, f, indent=2)

+         else:

+             with open(stats_file, "r") as f:

+                 results = json.load(f)

  

-         if len(results) > 0:

-             with open(stats_file, "w") as f:

-                 json.dump(results, f, indent=2)

+         distribution_stats = extract_release_stats(distribution_stats, results)

  

      log.info("Storing distribution stats")

      if not os.path.exists(distribution_stats_folder):
@@ -137,6 +140,11 @@ 

      stats = dict()

  

      for file in files:

+         # remove non standard comments

+         # taken from: https://github.com/translate/translate/blob/master/tools/pocommentclean

+         command = ["sed", "-i", "/^#$/d;/^#[^\:\~,\.]/d", file]

+         subprocess.run(command, check=True, capture_output=True)

+ 

          try:

              stat = calcstats(file)

          except Exception as e:
@@ -168,12 +176,15 @@ 

      metadata = dict()

      try:

          metadata = polib.pofile(file).metadata

-     except UnicodeDecodeError:

-         # encoding error, to investigate before using it in TM

-         metadata["Language"] = "error-unicode"

      except OSError:

          # maybe a polib bug? to investigate before using it in TM

          metadata["Language"] = "error-os"

+     except UnicodeDecodeError:

+         # encoding error, to investigate before using it in TM

+         metadata["Language"] = "error-unicode"

+     except TypeError:

+         # TypeError: '>' not supported between instances of 'str' and 'int'

+         metadata["Language"] = "error-valuerror"

  

      team = "Unknown..."

      try:

file modified
+126 -105
@@ -10,11 +10,6 @@ 

  import tempfile

  import logging

  

- from io import BytesIO

- from translate.convert import po2tmx

- from translate.storage import factory, po

- from translate.tools import poterminology

- 

  

  def main():

      """Handle params"""
@@ -41,7 +36,7 @@ 

  

      loglevel = logging.INFO

      if args.verbose:

-       loglevel = logging.DEBUG

+         loglevel = logging.DEBUG

      logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=loglevel)

      log = logging.getLogger("buildTm")

  
@@ -69,7 +64,7 @@ 

      for lang in sorted(langs):

          lang_code = lang[: -len(".json")]

  

-         log.info(" {l}".format(l=lang_code))

+         log.info("Processing {l}".format(l=lang_code))

  

          with open(os.path.join(lang_path, lang), "r") as read_file:

              files = json.load(read_file)["po"]
@@ -78,18 +73,19 @@ 

          compendium_file = os.path.join(

              os.path.dirname(os.path.abspath(__file__)), compendium_file

          )

-         if not os.path.isfile(compendium_file):

-             try:

-                 process_compendium(files, compendium_file, debug_folder)

-             except Exception as e:

-                 log.error(

-                     " Compendium generation triggered an {t} exception: {e}".format(

-                         t=type(e).__name__, e=e

-                     )

-                 )

+         compendium_archive = compendium_file + ".gz"

+         if os.path.isfile(compendium_file) is False and os.path.isfile(compendium_archive) is False:

+             log.info("Compendium generation")

+             process_compendium(files, compendium_file, debug_folder)

+             # remove non standard comments

+             # taken from: https://github.com/translate/translate/blob/master/tools/pocommentclean

+             command = ["sed", "-i", "/^#$/d;/^#[^\:\~,\.]/d", compendium_file]

+             subprocess.run(command, check=True, capture_output=True)

  

          tmx_file = os.path.join(tm_folder, lang_code + ".tmx")

-         if not os.path.isfile(tmx_file):

+         tmx_archive = tmx_file + ".gz"

+         if os.path.isfile(tmx_file) is False and os.path.isfile(tmx_archive) is False:

+             log.info("TMX generation")

              try:

                  process_tmx(lang_code, compendium_file, tmx_file)

              except Exception as e:
@@ -100,7 +96,9 @@ 

                  )

  

          terminology_file = os.path.join(tm_folder, lang_code + ".terminology.po")

-         if not os.path.isfile(terminology_file):

+         terminology_archive = terminology_file + ".gz"

+         if os.path.isfile(terminology_file) is False and os.path.isfile(terminology_archive) is False:

+             log.info("Terminology generation")

              try:

                  process_terminology(compendium_file, terminology_file)

              except Exception as e:
@@ -110,13 +108,21 @@ 

                      )

                  )

  

+         if args.compress:

+             if os.path.isfile(compendium_file):

+                 compress(compendium_file, compendium_archive)

+ 

+             if os.path.isfile(tmx_file):

+                 compress(tmx_file, tmx_archive)

+ 

+             if os.path.isfile(terminology_file):

+                 compress(terminology_file, terminology_archive)

+ 

+     log.info("All languages are processed")

+ 

      log.info("Detecting missing files")

      for lang in sorted(langs):

-         check_lang(lang[: -len(".json")], tm_folder)

- 

-     if args.compress:

-         log.info("Compressing files")

-         compress(tm_folder)

+         check_lang(lang[: -len(".json")], tm_folder, args.compress)

  

  

  def process_compendium(langfiles, dest, debug_folder):
@@ -129,6 +135,7 @@ 

      count = 0

  

      with tempfile.TemporaryDirectory(prefix="l10n-tm") as tmp:

+ 

          for i in pofiles:

              try:

                  command = [
@@ -153,35 +160,24 @@ 

                      subprocess.run(command, check=True, cwd=tmp, capture_output=True)

                  except subprocess.CalledProcessError as e:

                      debug_filename = "tm-msguniq-{lang}-{name}".format(lang=dest.split("/")[-1], name=count.__str__())

-                     log.error(" msguniq error with {i} a copy of this file is into {d} as {n}".format(i=i, e=e.output, d=debug_folder, n=debug_filename))

+                     log.error(" msguniq error with {i} a copy of this file is into {d} as {n}".format(i=i, e=e.output,

+                                                                                                       d=debug_folder,

+                                                                                                       n=debug_filename))

                      shutil.copyfile(i, os.path.join(debug_folder, debug_filename))

  

              count += 1

  

-         # search every file that were successful

-         search_guilty_file(tmp, dest, debug_folder)

- 

+         all_files = [f for f in os.listdir(tmp) if os.path.isfile(os.path.join(tmp, f))]

+         if len(all_files) == 1:

+             shutil.copyfile(os.path.join(tmp, all_files[0]), dest)

+         else:

+             msgcat_recursive(dest, tmp, debug_folder, all_files, list(), list())

  

- def search_guilty_file(path, dest, debug_folder):

-     log = logging.getLogger("buildTm.process_compendium.guilty")

-     all_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]

  

-     try_msgcat(all_files, dest, path)

- 

-     guilty_file = None

-     while os.path.isfile(dest) is False:

-         guilty_file = all_files.pop()

-         try_msgcat(all_files, dest, path)

- 

-     if guilty_file is not None:

-         debug_filename = "tm-msgcat-{lang}-{name}".format(lang=dest.split("/")[-1], name=guilty_file)

-         log.error("the file {f} raised error with msgcat, a copy of this file is into {d} as {n}".format(f=guilty_file, d=debug_folder, n=debug_filename))

-         shutil.move(os.path.join(path, guilty_file), os.path.join(debug_folder, debug_filename))

-         os.remove(dest)

-         search_guilty_file(path, dest, debug_folder)

- 

- 

- def try_msgcat(files, destination, cwd):

+ def msgcat(files, destination, path, doubt=False):

+     """ Call the msgcat command on a list of po files

+     Only print output if a bug is suspected """

+     log = logging.getLogger("buildTm.msgcat")

      command = [

                    "msgcat",

                    "--force-po",
@@ -191,60 +187,87 @@ 

                ] + files

  

      try:

-         subprocess.run(command, check=True, cwd=cwd, capture_output=True)

-     except subprocess.CalledProcessError:

+         subprocess.run(command, check=True, cwd=path, capture_output=True)

+     except subprocess.CalledProcessError as e:

          # msgcat often raise exception but continues its processing

+         if doubt is not False:

+             log.error("Error with file {d}: {e}".format(d=doubt, e=e.stderr.decode('utf8')))

          pass

  

  

+ def store_debug_file(path, name, file, debug_folder):

+     """ Move the temporary move file in debug folder """

+     log = logging.getLogger("buildTm.store_debug_file")

+     target = os.path.join(debug_folder, "{n}-{f}".format(n=name, f=file))

+     log.error("The file {f} were moved into {t}".format(f=file, t=target))

+     shutil.move(os.path.join(path, file), target)

+ 

+ 

+ def msgcat_recursive(destination, path, debug_folder, backlog, ongoing, ok):

+     """ Try to call msgcat, retry with half of the files if it fails """

+     log = logging.getLogger("buildTm.msgcat_recursive")

+     doubt = False

+     log.debug("backlog={b}, ongoing={o}, ok={ok}".format(b=len(backlog), o=len(ongoing), ok=len(ok)))

+     if len(ongoing) == 0:

+         ongoing = backlog.copy()

+         backlog = []

+ 

+     # we can't use msgcat with one single file

+     if len(ongoing) == 1:

+         doubt = ongoing.copy().pop()

+         ongoing.append(ok[0])

+ 

+     msgcat(ongoing, destination, path, doubt)

+ 

+     if os.path.isfile(destination) is True:

+         processed = len(ongoing)

+         ok += ongoing

+         # if we added one item from 'ok', we want to make sure it's not duplicated

+         ok = list(set(ok))

+         ongoing = []

+         if len(ok) == processed and len(backlog) == 0:

+             log.debug("First generation worked")

+         else:

+             os.remove(destination)

+     else:

+         if doubt is not False:

+             log.debug("This file raised a msgcat bug: {f}".format(f=doubt))

+             store_debug_file(path, "tm-msgcat-" + destination.split("/")[-1], doubt, debug_folder)

+             ongoing = []

+ 

+         half = int(len(ongoing) / 2)

+         backlog += ongoing[half:]

+         ongoing = ongoing[:half]

+ 

+     if len(backlog) + len(ongoing) > 0:

+         msgcat_recursive(destination, path, debug_folder, backlog, ongoing, ok)

+     else:

+         if os.path.isfile(destination) is False:

+             log.debug("Generating remaining files")

+             msgcat(ok, destination, path)

+ 

+         if os.path.isfile(destination) is False:

+             log.error("weird, some files raising bugs were missed?")

+             msgcat_recursive(destination, path, debug_folder, ok, list(), list())

+ 

+ 

  def process_tmx(lang, source, dest):

      """ Generate a translation memory from a po file """

  

-     outputfile = po2tmx.tmxmultifile(dest)

-     po2tmx.convertpo(

-         inputfile=BytesIO(open(source, "r").read().encode()),

-         outputfile=outputfile,

-         templatefile=None,

-         sourcelanguage="en",

-         targetlanguage=lang,

-         comment="source",

-     )

- 

-     outputfile.tmxfile.savefile(dest)

+     command = ["po2tmx", "--language=" + lang, "--progress=none", source, "--output=" + dest]

+     subprocess.run(command, check=True, capture_output=True)

  

  

  def process_terminology(source, dest):

      """ Generate a termonology from a po file """

  

-     extractor = poterminology.TerminologyExtractor()

-     options = {

-         "inputmin": "1",

-         "fullmsgmin": "1",

-         "substrmin": "2",

-         "locmin": "2",

-         "nonstopmin": 1,

-         "sortorders": ["frequency", "dictionary", "length"],

-         "output": dest,

-     }

- 

-     with open(source, "rb") as fh:

-         inputfile = factory.getobject(fh)

- 

-     extractor.processunits(inputfile.units, source)

-     terms = extractor.extract_terms()

- 

-     termfile = po.pofile()

-     termitems = extractor.filter_terms(

-         terms, nonstopmin=options["nonstopmin"], sortorders=options["sortorders"]

-     )

-     for count, unit in termitems:

-         termfile.units.append(unit)

- 

-     with open(options["output"], "wb") as fh:

-         termfile.serialize(fh)

+     command = ["poterminology", "--ignore-case", "--fold-titlecase",

+                "--inputs-needed", "1",

+                "--progress=none", source, "--output=" + dest]

+     subprocess.run(command, check=True, capture_output=True)

  

  

- def check_lang(lang, tm_folder):

+ def check_lang(lang, tm_folder, compress):

      """ Check if expected files were generated """

      log = logging.getLogger("buildTm.check_lang")

  
@@ -252,33 +275,31 @@ 

      tmx_file = os.path.join(tm_folder, lang + ".tmx")

      terminology_file = os.path.join(tm_folder, lang + ".terminology.po")

  

-     if not os.path.isfile(compendium_file):

-         log.warning(" {l}-compendium is missing".format(l=lang))

+     if compress is True:

+         compendium_file += ".gz"

+         tmx_file += ".gz"

+         terminology_file += ".gz"

  

-     if not os.path.isfile(tmx_file):

-         log.warning(" {l}-tmx is missing".format(l=lang))

+     if os.path.isfile(compendium_file) is False:

+         log.warning("{l}-compendium is missing".format(l=lang))

  

-     if not os.path.isfile(terminology_file):

-         log.warning(" {l}-terminology is missing".format(l=lang))

+     if os.path.isfile(tmx_file) is False:

+         log.warning("{l}-tmx is missing".format(l=lang))

  

+     if os.path.isfile(terminology_file) is False:

+         log.warning("{l}-terminology is missing".format(l=lang))

  

- def compress(folder):

+ 

+ def compress(source, archive):

      """ Compress files uzing gzip """

      log = logging.getLogger("buildTm.compress")

  

-     files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]

- 

-     for file in sorted(files):

-         if file.endswith(".gz"):

-             continue

- 

-         dest = file + ".gz"

-         if os.path.isfile(os.path.join(folder, dest)):

-             continue

+     log.info("Compressing")

+     with open(source, "rb") as file_in:

+         with gzip.open(archive, "wb") as file_out:

+             file_out.writelines(file_in)

  

-         with open(os.path.join(folder, file), "rb") as file_in:

-             with gzip.open(os.path.join(folder, dest), "wb") as file_out:

-                 file_out.writelines(file_in)

+     os.remove(source)

  

  

  if __name__ == "__main__":

file modified
+39
@@ -166,6 +166,13 @@ 

  

          generate_static_pages_packages(args.results, code, content, dest_file)

  

+     log.info("Generating indexes")

+     dest_file = os.path.join(static_langs_folder, "_index.md")

+     generate_language_index(args.results, dest_file)

+ 

+     dest_file = os.path.join(static_pkgs_folder, "_index.md")

+     generate_package_index(args.results, dest_file)

+ 

      log.info("Copy translation memories")

      languages = [

          f for f in os.listdir(tm_folder) if os.path.isfile(os.path.join(tm_folder, f))
@@ -383,6 +390,38 @@ 

          write_out.write(outputText)

  

  

+ def generate_language_index(distribution, dest_file):

+     log = logging.getLogger("buildWebsite.generate_language_index")

+     data = dict()

+     data["distribution"] = distribution

+     data["now"] = datetime.datetime.utcnow()

+ 

+     templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")

+     templateEnv = jinja2.Environment(loader=templateLoader, undefined=jinja2.Undefined)

+     TEMPLATE_FILE = "_index.language.md"

+     template = templateEnv.get_template(TEMPLATE_FILE)

+     outputText = template.render(data)

+ 

+     with open(dest_file, "w") as write_out:

+         write_out.write(outputText)

+ 

+ 

+ def generate_package_index(distribution, dest_file):

+     log = logging.getLogger("buildWebsite.generate_package_index")

+     data = dict()

+     data["distribution"] = distribution

+     data["now"] = datetime.datetime.utcnow()

+ 

+     templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")

+     templateEnv = jinja2.Environment(loader=templateLoader, undefined=jinja2.Undefined)

+     TEMPLATE_FILE = "_index.package.md"

+     template = templateEnv.get_template(TEMPLATE_FILE)

+     outputText = template.render(data)

+ 

+     with open(dest_file, "w") as write_out:

+         write_out.write(outputText)

+ 

+ 

  def store_json_file(content, dest_file):

      with open(dest_file, "w") as f:

          f.write(json.dumps(content, indent=2))

file modified
+11 -6
@@ -1,13 +1,18 @@ 

- FROM registry.fedoraproject.org/fedora:33

- 

- RUN dnf install -y lbzip2 unzip xz git cpio translate-toolkit dnf-plugins-core python3-pip rsync vim

+ FROM registry.fedoraproject.org/fedora:33 as builder

  

+ RUN dnf install -y lbzip2 unzip xz cpio dnf-plugins-core rsync python3-pip hugo gettext git

  COPY requirements.txt /src/requirements.txt

+ 

  RUN pip install --no-cache -r /src/requirements.txt

  RUN pip install --upgrade https://github.com/WeblateOrg/language-data/archive/master.zip

  RUN pip install charamel

  RUN pip install git+https://github.com/WeblateOrg/translation-finder.git

- 

- VOLUME /src

- VOLUME /srpms

+ RUN mkdir -p /src/results /srpms; chmod g+rwX /srpms; chmod -R 1777 /tmp

  WORKDIR /src

+ 

+ COPY *.py *.sh /src/

+ COPY website /src/website/

+ RUN chmod -R g+rwX /src/website

+ COPY templates /src/templates/

+ VOLUME /src/results

+ ENV VERS f33

@@ -0,0 +1,18 @@ 

+ FROM registry.fedoraproject.org/fedora:34 as builder

+ 

+ RUN dnf install -y lbzip2 unzip xz cpio dnf-plugins-core rsync python3-pip hugo gettext git

+ COPY requirements.txt /src/requirements.txt

+ 

+ RUN pip install --no-cache -r /src/requirements.txt

+ RUN pip install --upgrade https://github.com/WeblateOrg/language-data/archive/master.zip

+ RUN pip install charamel

+ RUN pip install git+https://github.com/WeblateOrg/translation-finder.git

+ RUN mkdir -p /src/results /srpms; chmod g+rwX /srpms; chmod -R 1777 /tmp

+ WORKDIR /src

+ 

+ COPY *.py *.sh /src/

+ COPY website /src/website/

+ RUN chmod -R g+rwX /src/website

+ COPY templates /src/templates/

+ VOLUME /src/results

+ ENV VERS f34

@@ -0,0 +1,5 @@ 

+ ---

+ title: "Languages for {{ distribution }}"

+ date: {{ now }}

+ layout: "list_languages"

+ --- 

\ No newline at end of file

@@ -0,0 +1,4 @@ 

+ ---

+ title: "Packages for {{ distribution }}"

+ date: {{ now }}

+ --- 

\ No newline at end of file

file modified
+5 -1
@@ -1,6 +1,10 @@ 

  ---

- title: "{{ lang_name_en }} ({{ lang_name_local }})"

+ title: "{{ lang_code }}-{{ lang_name_en }} ({{ lang_name_local }})"

  date: {{ now }}

+ code: {{ lang_code }}

+ name_english: {{ lang_name_en }}

+ name_local: {{ lang_name_local }}

+ progress_d: {{ progress_d }}

  ---

  

  Language progress for {{ lang_name_en }} ({{ lang_code }}) in Fedora {{ results }} is:

file modified
+4 -1
@@ -1,5 +1,8 @@ 

  baseURL = "https://jibecfed.fedorapeople.org/partage/fedora-localization-statistics/"

  languageCode = "en-us"

- title = "Temporary demo"

+ title = "Fedora localization statistics"

  theme = "beautifulhugo"

  staticDir = "static"

+ 

+ [markup.goldmark.renderer]

+ unsafe= true 

\ No newline at end of file

@@ -0,0 +1,12 @@ 

+ {{ define "main" }}

+   <div class="container" role="main">

+       <div class="col-lg-8 col-lg-offset-2 col-md-10 col-md-offset-1">

+       {{ .Content }}

+       <ul class="contents">

+           {{ range sort .Pages }}

+               <li><a href="{{ .Permalink }}">{{ .Title }}</a></li>

+           {{ end }}

+           </ul>

+       </div>

+   </div>

+ {{ end }}

@@ -7,11 +7,6 @@ 

              {{.}}

            </div>

          {{ end }}

-         <div class="posts-list">

-           {{ range .Paginator.Pages }}

-             {{ partial "post_preview.html" .}}

-           {{ end }}

-         </div>

          {{ if or (.Paginator.HasPrev) (.Paginator.HasNext) }}

            <ul class="pager main-pager">

              {{ if .Paginator.HasPrev }}

@@ -0,0 +1,81 @@ 

+ {{ define "main" }}

+   <div class="container" role="main">

+       <div class="col-lg-8 col-lg-offset-2 col-md-10 col-md-offset-1">

+       {{ .Content }}

+ 

+           <table class="contents" id="languages">

+               <caption>Click on columns headers to sort values</caption>

+           <tr>

+               <th onclick="sortTable(0)">code</th>

+               <th onclick="sortTable(1)">English name</th>

+               <th onclick="sortTable(2)">Local name</th>

+               <th onclick="sortTable(3)">Progress</th>

+           </tr>

+           {{ range sort .Pages "Title" "asc" }}

+             <tr>

+                 <td><a href="{{ .Permalink }}">{{ .Params.code }}</a></td>

+                 <td>{{ .Params.name_english }}</td>

+                 <td>{{ .Params.name_local }}</td>

+                 <td>{{ .Params.progress_d }}</td>

+             </tr>

+           {{ end }}

+       </table>

+       </div>

+   </div>

+ <script>

+ function sortTable(n) {

+   var table, rows, switching, i, x, y, shouldSwitch, dir, switchcount = 0;

+   table = document.getElementById("languages");

+   switching = true;

+   // Set the sorting direction to ascending:

+   dir = "asc";

+   /* Make a loop that will continue until

+   no switching has been done: */

+   while (switching) {

+     // Start by saying: no switching is done:

+     switching = false;

+     rows = table.rows;

+     /* Loop through all table rows (except the

+     first, which contains table headers): */

+     for (i = 1; i < (rows.length - 1); i++) {

+       // Start by saying there should be no switching:

+       shouldSwitch = false;

+       /* Get the two elements you want to compare,

+       one from current row and one from the next: */

+       x = rows[i].getElementsByTagName("TD")[n];

+       y = rows[i + 1].getElementsByTagName("TD")[n];

+       /* Check if the two rows should switch place,

+       based on the direction, asc or desc: */

+       if (dir == "asc") {

+         if (x.innerHTML.toLowerCase() > y.innerHTML.toLowerCase()) {

+           // If so, mark as a switch and break the loop:

+           shouldSwitch = true;

+           break;

+         }

+       } else if (dir == "desc") {

+         if (x.innerHTML.toLowerCase() < y.innerHTML.toLowerCase()) {

+           // If so, mark as a switch and break the loop:

+           shouldSwitch = true;

+           break;

+         }

+       }

+     }

+     if (shouldSwitch) {

+       /* If a switch has been marked, make the switch

+       and mark that a switch has been done: */

+       rows[i].parentNode.insertBefore(rows[i + 1], rows[i]);

+       switching = true;

+       // Each time a switch is done, increase this count by 1:

+       switchcount ++;

+     } else {

+       /* If no switching has been done AND the direction is "asc",

+       set the direction to "desc" and run the while loop again. */

+       if (switchcount == 0 && dir == "asc") {

+         dir = "desc";

+         switching = true;

+       }

+     }

+   }

+ }

+ </script>

+ {{ end }} 

\ No newline at end of file

@@ -7,28 +7,11 @@ 

              {{.}}

            </div>

          {{ end }}

- 

-         <div class="posts-list">

-           {{ $pag := .Paginate (where site.RegularPages "Type" "in" site.Params.mainSections) }}

-           {{ range $pag.Pages }}

-             {{ partial "post_preview" . }}

+         <ul class="">

+           {{ range .Site.Sections }}

+             <li><a href="{{ .Permalink }}">{{ .Title }}</a></li>

            {{ end }}

-         </div>

- 

-         {{ if or (.Paginator.HasPrev) (.Paginator.HasNext) }}

-           <ul class="pager main-pager">

-             {{ if .Paginator.HasPrev }}

-               <li class="previous">

-                 <a href="{{ .Permalink }}page/{{ .Paginator.Prev.PageNumber }}/">&larr; {{ i18n "newerPosts" }}</a>

-               </li>

-             {{ end }}

-             {{ if .Paginator.HasNext }}

-               <li class="next">

-                 <a href="{{ .Permalink }}page/{{ .Paginator.Next.PageNumber }}/">{{ i18n "olderPosts" }} &rarr;</a>

-               </li>

-             {{ end }}

-           </ul>

-         {{ end }}

+         </ul>

        </div>

      </div>

    </div>

@@ -0,0 +1,13 @@ 

+ <ol  class="nav navbar-nav">

+   {{ template "breadcrumbnav" (dict "p1" . "p2" .) }}

+ </ol>

+ {{ define "breadcrumbnav" }}

+ {{ if .p1.Parent }}

+ {{ template "breadcrumbnav" (dict "p1" .p1.Parent "p2" .p2 )  }}

+ {{ else if not .p1.IsHome }}

+ {{ template "breadcrumbnav" (dict "p1" .p1.Site.Home "p2" .p2 )  }}

+ {{ end }}

+ <li{{ if eq .p1 .p2 }} class="active"{{ end }}>

+   <a href="{{ .p1.Permalink }}">{{ .p1.Title }}</a>

+ </li>

+ {{ end }} 

\ No newline at end of file