| |
@@ -3,6 +3,7 @@
|
| |
|
| |
import argparse
|
| |
import datetime
|
| |
+
|
| |
import jinja2
|
| |
import json
|
| |
import langtable
|
| |
@@ -23,18 +24,24 @@
|
| |
|
| |
parser.add_argument("--refresh", action="store_true", help="Force refresh of files")
|
| |
|
| |
- parser.add_argument("-v",
|
| |
- "--verbose",
|
| |
- default=False,
|
| |
- action="store_true",
|
| |
- dest="verbose",
|
| |
- help="Add verbosity")
|
| |
+ parser.add_argument(
|
| |
+ "-v",
|
| |
+ "--verbose",
|
| |
+ default=False,
|
| |
+ action="store_true",
|
| |
+ dest="verbose",
|
| |
+ help="Add verbosity",
|
| |
+ )
|
| |
args = parser.parse_args()
|
| |
|
| |
loglevel = logging.INFO
|
| |
if args.verbose:
|
| |
- loglevel = logging.DEBUG
|
| |
- logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=loglevel, force=True)
|
| |
+ loglevel = logging.DEBUG
|
| |
+ logging.basicConfig(
|
| |
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
| |
+ level=loglevel,
|
| |
+ force=True,
|
| |
+ )
|
| |
log = logging.getLogger("buildWebsite")
|
| |
|
| |
results_folder = "./results/{v}/".format(v=args.results)
|
| |
@@ -92,7 +99,14 @@
|
| |
|
| |
for lang in results.get("stats", []):
|
| |
val = packages_langs_results.get(lang["lang_code"], [])
|
| |
- val.append({"name": name, "progress": lang["progress"], "translated": lang["translated"], "team": lang["team"]})
|
| |
+ val.append(
|
| |
+ {
|
| |
+ "name": name,
|
| |
+ "progress": lang["progress"],
|
| |
+ "translated": lang["translated"],
|
| |
+ "team": lang["team"],
|
| |
+ }
|
| |
+ )
|
| |
packages_langs_results[lang["lang_code"]] = val
|
| |
|
| |
log.info("Prepare json files for languages")
|
| |
@@ -239,7 +253,9 @@
|
| |
|
| |
# sometimes, no file were found, which means no stats can be used
|
| |
if len(tmp_df) == 0:
|
| |
- log.warning(" The template {t} for {f} is empty".format(t=template, f=stats_file))
|
| |
+ log.warning(
|
| |
+ " The template {t} for {f} is empty".format(t=template, f=stats_file)
|
| |
+ )
|
| |
continue
|
| |
|
| |
tmp_df["totalsourcewords"] = (
|
| |
@@ -260,6 +276,9 @@
|
| |
stats_df = dfs[0]
|
| |
|
| |
stats_df_w_lang = pd.merge(stats_df, log_files, how="inner", on="filename")
|
| |
+ stats_df_w_lang["filename"] = stats_df_w_lang["filename"].apply(
|
| |
+ lambda s: s[len(os.path.commonprefix(stats_df_w_lang["filename"].tolist())) :]
|
| |
+ )
|
| |
stats_df_no_lang = pd.merge(stats_df, log_files, how="outer", indicator=True).loc[
|
| |
lambda x: x["_merge"] == "left_only"
|
| |
]
|
| |
@@ -274,8 +293,17 @@
|
| |
.reset_index()
|
| |
.droplevel(1, axis=1)
|
| |
)
|
| |
- temp_teams = stats_df_w_lang.groupby("lang_code")["team"].apply(lambda x: ",".join(x))
|
| |
- temp = pd.merge(temp_translated, temp_teams, how="inner", on="lang_code").to_dict(orient="records")
|
| |
+
|
| |
+ temp_teams = stats_df_w_lang.groupby("lang_code")["team"].apply(
|
| |
+ lambda x: ",".join(x)
|
| |
+ )
|
| |
+ temp_files = stats_df_w_lang.groupby("lang_code")["filename"].apply(
|
| |
+ lambda x: ",".join(x)
|
| |
+ )
|
| |
+ temp_bis = pd.merge(temp_teams, temp_files, how="inner", on="lang_code")
|
| |
+ temp = pd.merge(temp_translated, temp_bis, how="inner", on="lang_code").to_dict(
|
| |
+ orient="records"
|
| |
+ )
|
| |
|
| |
for line in temp:
|
| |
line["progress"] = 0
|
| |
@@ -290,7 +318,9 @@
|
| |
line["progress"] = 0
|
| |
continue
|
| |
try:
|
| |
- line["progress"] = round((int(line["translatedsourcewords"]) / total_source_words) * 100)
|
| |
+ line["progress"] = round(
|
| |
+ (int(line["translatedsourcewords"]) / total_source_words) * 100
|
| |
+ )
|
| |
except OverflowError:
|
| |
log.info(
|
| |
" File {f} has Translated={t} and Source={tot}".format(
|
| |
@@ -300,6 +330,8 @@
|
| |
)
|
| |
)
|
| |
|
| |
+ line["filename"] = line["filename"].split(",")
|
| |
+
|
| |
results["stats"] = list()
|
| |
for line in sorted(temp, key=lambda k: k["progress"], reverse=True):
|
| |
del line["translatedsourcewords"]
|
| |
when we are seeing the list of languages, we want to see the list, so that debug can be done faster