|
|
|
|
@@ -1,218 +1,199 @@
|
|
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
# coding=utf-8
|
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
|
import json
|
|
|
|
|
import os
|
|
|
|
|
import io
|
|
|
|
|
import functools
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
import sys
|
|
|
|
|
import fontTables
|
|
|
|
|
import json
|
|
|
|
|
import logging
|
|
|
|
|
import os
|
|
|
|
|
import re
|
|
|
|
|
import subprocess
|
|
|
|
|
import sys
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
from itertools import chain
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from typing import Dict, List, TextIO, Tuple, Union
|
|
|
|
|
|
|
|
|
|
HERE = os.path.dirname(__file__)
|
|
|
|
|
from bdflib import reader as bdfreader
|
|
|
|
|
from bdflib.model import Font, Glyph
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
to_unicode = unicode
|
|
|
|
|
except NameError:
|
|
|
|
|
to_unicode = str
|
|
|
|
|
import font_tables
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
|
|
|
|
|
|
|
|
|
HERE = Path(__file__).resolve().parent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@functools.lru_cache(maxsize=None)
|
|
|
|
|
def cjkFont():
|
|
|
|
|
from bdflib import reader as bdfreader
|
|
|
|
|
|
|
|
|
|
def cjk_font() -> Font:
|
|
|
|
|
with open(os.path.join(HERE, "wqy-bitmapsong/wenquanyi_9pt.bdf"), "rb") as f:
|
|
|
|
|
return bdfreader.read_bdf(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def log(message):
|
|
|
|
|
print(message, file=sys.stdout)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Loading a single JSON file
|
|
|
|
|
def loadJson(fileName, skipFirstLine):
|
|
|
|
|
with io.open(fileName, mode="r", encoding="utf-8") as f:
|
|
|
|
|
if skipFirstLine:
|
|
|
|
|
def load_json(filename: str, skip_first_line: bool) -> dict:
|
|
|
|
|
with open(filename) as f:
|
|
|
|
|
if skip_first_line:
|
|
|
|
|
f.readline()
|
|
|
|
|
|
|
|
|
|
obj = json.loads(f.read())
|
|
|
|
|
|
|
|
|
|
return obj
|
|
|
|
|
return json.loads(f.read())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def readTranslation(jsonDir, langCode):
|
|
|
|
|
fileName = "translation_{}.json".format(langCode)
|
|
|
|
|
def read_translation(json_root: Union[str, Path], lang_code: str) -> dict:
|
|
|
|
|
filename = f"translation_{lang_code}.json"
|
|
|
|
|
|
|
|
|
|
fileWithPath = os.path.join(jsonDir, fileName)
|
|
|
|
|
file_with_path = os.path.join(json_root, filename)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
lang = loadJson(fileWithPath, False)
|
|
|
|
|
lang = load_json(file_with_path, skip_first_line=False)
|
|
|
|
|
except json.decoder.JSONDecodeError as e:
|
|
|
|
|
log("Failed to decode " + fileName)
|
|
|
|
|
log(str(e))
|
|
|
|
|
logging.error(f"Failed to decode {filename}")
|
|
|
|
|
logging.exception(str(e))
|
|
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
|
|
# Extract lang code from file name
|
|
|
|
|
langCode = fileName[12:-5].upper()
|
|
|
|
|
# ...and the one specified in the JSON file...
|
|
|
|
|
try:
|
|
|
|
|
langCodeFromJson = lang["languageCode"]
|
|
|
|
|
except KeyError:
|
|
|
|
|
langCodeFromJson = "(missing)"
|
|
|
|
|
|
|
|
|
|
# ...cause they should be the same!
|
|
|
|
|
if langCode != langCodeFromJson:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
"Invalid languageCode " + langCodeFromJson + " in file " + fileName
|
|
|
|
|
)
|
|
|
|
|
validate_langcode_matches_content(filename, lang)
|
|
|
|
|
|
|
|
|
|
return lang
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def writeStart(f):
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
"""// WARNING: THIS FILE WAS AUTO GENERATED BY make_translation.py. PLEASE DO NOT EDIT.
|
|
|
|
|
def validate_langcode_matches_content(filename: str, content: dict) -> None:
|
|
|
|
|
# Extract lang code from file name
|
|
|
|
|
lang_code = filename[12:-5].upper()
|
|
|
|
|
# ...and the one specified in the JSON file...
|
|
|
|
|
try:
|
|
|
|
|
lang_code_from_json = content["languageCode"]
|
|
|
|
|
except KeyError:
|
|
|
|
|
lang_code_from_json = "(missing)"
|
|
|
|
|
|
|
|
|
|
#include "Translation.h"
|
|
|
|
|
"""
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
# ...cause they should be the same!
|
|
|
|
|
if lang_code != lang_code_from_json:
|
|
|
|
|
raise ValueError(f"Invalid languageCode {lang_code_from_json} in file {filename}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def escapeC(s):
|
|
|
|
|
return s.replace('"', '\\"')
|
|
|
|
|
def write_start(f: TextIO):
|
|
|
|
|
f.write("// WARNING: THIS FILE WAS AUTO GENERATED BY make_translation.py. PLEASE DO NOT EDIT.\n")
|
|
|
|
|
f.write("\n")
|
|
|
|
|
f.write('#include "Translation.h"\n')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getConstants():
|
|
|
|
|
def get_constants() -> List[str]:
|
|
|
|
|
# Extra constants that are used in the firmware that are shared across all languages
|
|
|
|
|
consants = []
|
|
|
|
|
consants.append(("SymbolPlus", "+"))
|
|
|
|
|
consants.append(("SymbolMinus", "-"))
|
|
|
|
|
consants.append(("SymbolSpace", " "))
|
|
|
|
|
consants.append(("SymbolDot", "."))
|
|
|
|
|
consants.append(("SymbolDegC", "C"))
|
|
|
|
|
consants.append(("SymbolDegF", "F"))
|
|
|
|
|
consants.append(("SymbolMinutes", "M"))
|
|
|
|
|
consants.append(("SymbolSeconds", "S"))
|
|
|
|
|
consants.append(("SymbolWatts", "W"))
|
|
|
|
|
consants.append(("SymbolVolts", "V"))
|
|
|
|
|
consants.append(("SymbolDC", "DC"))
|
|
|
|
|
consants.append(("SymbolCellCount", "S"))
|
|
|
|
|
consants.append(("SymbolVersionNumber", buildVersion))
|
|
|
|
|
return consants
|
|
|
|
|
return [
|
|
|
|
|
("SymbolPlus", "+"),
|
|
|
|
|
("SymbolMinus", "-"),
|
|
|
|
|
("SymbolSpace", " "),
|
|
|
|
|
("SymbolDot", "."),
|
|
|
|
|
("SymbolDegC", "C"),
|
|
|
|
|
("SymbolDegF", "F"),
|
|
|
|
|
("SymbolMinutes", "M"),
|
|
|
|
|
("SymbolSeconds", "S"),
|
|
|
|
|
("SymbolWatts", "W"),
|
|
|
|
|
("SymbolVolts", "V"),
|
|
|
|
|
("SymbolDC", "DC"),
|
|
|
|
|
("SymbolCellCount", "S"),
|
|
|
|
|
("SymbolVersionNumber", buildVersion)
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getDebugMenu():
|
|
|
|
|
constants = []
|
|
|
|
|
constants.append(datetime.today().strftime("%d-%m-%y"))
|
|
|
|
|
constants.append("HW G ") # High Water marker for GUI task
|
|
|
|
|
constants.append("HW M ") # High Water marker for MOV task
|
|
|
|
|
constants.append("HW P ") # High Water marker for PID task
|
|
|
|
|
constants.append("Time ") # Uptime (aka timestamp)
|
|
|
|
|
constants.append("Move ") # Time of last significant movement
|
|
|
|
|
constants.append("RTip ") # Tip reading in uV
|
|
|
|
|
constants.append("CTip ") # Tip temp in C
|
|
|
|
|
constants.append("CHan ") # Handle temp in C
|
|
|
|
|
constants.append("Vin ") # Input voltage
|
|
|
|
|
constants.append("PCB ") # PCB Version AKA IMU version
|
|
|
|
|
constants.append("PWR ") # Power Negotiation State
|
|
|
|
|
constants.append("Max ") # Max deg C limit
|
|
|
|
|
|
|
|
|
|
return constants
|
|
|
|
|
def get_debug_menu() -> List[str]:
|
|
|
|
|
return [
|
|
|
|
|
datetime.today().strftime("%d-%m-%y"),
|
|
|
|
|
"HW G ",
|
|
|
|
|
"HW M ",
|
|
|
|
|
"HW P ",
|
|
|
|
|
"Time ",
|
|
|
|
|
"Move ",
|
|
|
|
|
"RTip ",
|
|
|
|
|
"CTip ",
|
|
|
|
|
"CHan ",
|
|
|
|
|
"Vin ",
|
|
|
|
|
"PCB ",
|
|
|
|
|
"PWR ",
|
|
|
|
|
"Max "
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getLetterCounts(defs, lang):
|
|
|
|
|
textList = []
|
|
|
|
|
def get_letter_counts(defs: dict, lang: dict) -> List[str]:
|
|
|
|
|
text_list = []
|
|
|
|
|
# iterate over all strings
|
|
|
|
|
obj = lang["menuOptions"]
|
|
|
|
|
for mod in defs["menuOptions"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
textList.append(obj[eid]["desc"])
|
|
|
|
|
text_list.append(obj[eid]["desc"])
|
|
|
|
|
|
|
|
|
|
obj = lang["messages"]
|
|
|
|
|
for mod in defs["messages"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
if eid not in obj:
|
|
|
|
|
textList.append(mod["default"])
|
|
|
|
|
text_list.append(mod["default"])
|
|
|
|
|
else:
|
|
|
|
|
textList.append(obj[eid])
|
|
|
|
|
text_list.append(obj[eid])
|
|
|
|
|
|
|
|
|
|
obj = lang["characters"]
|
|
|
|
|
|
|
|
|
|
for mod in defs["characters"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
textList.append(obj[eid])
|
|
|
|
|
text_list.append(obj[eid])
|
|
|
|
|
|
|
|
|
|
obj = lang["menuOptions"]
|
|
|
|
|
for mod in defs["menuOptions"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
textList.append(obj[eid]["text2"][0])
|
|
|
|
|
textList.append(obj[eid]["text2"][1])
|
|
|
|
|
text_list.append(obj[eid]["text2"][0])
|
|
|
|
|
text_list.append(obj[eid]["text2"][1])
|
|
|
|
|
|
|
|
|
|
obj = lang["menuGroups"]
|
|
|
|
|
for mod in defs["menuGroups"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
textList.append(obj[eid]["text2"][0])
|
|
|
|
|
textList.append(obj[eid]["text2"][1])
|
|
|
|
|
text_list.append(obj[eid]["text2"][0])
|
|
|
|
|
text_list.append(obj[eid]["text2"][1])
|
|
|
|
|
|
|
|
|
|
obj = lang["menuGroups"]
|
|
|
|
|
for mod in defs["menuGroups"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
textList.append(obj[eid]["desc"])
|
|
|
|
|
constants = getConstants()
|
|
|
|
|
text_list.append(obj[eid]["desc"])
|
|
|
|
|
constants = get_constants()
|
|
|
|
|
for x in constants:
|
|
|
|
|
textList.append(x[1])
|
|
|
|
|
textList.extend(getDebugMenu())
|
|
|
|
|
text_list.append(x[1])
|
|
|
|
|
text_list.extend(get_debug_menu())
|
|
|
|
|
|
|
|
|
|
# collapse all strings down into the composite letters and store totals for these
|
|
|
|
|
|
|
|
|
|
symbolCounts = {}
|
|
|
|
|
for line in textList:
|
|
|
|
|
symbol_counts: dict[str, int] = {}
|
|
|
|
|
for line in text_list:
|
|
|
|
|
line = line.replace("\n", "").replace("\r", "")
|
|
|
|
|
line = line.replace("\\n", "").replace("\\r", "")
|
|
|
|
|
if len(line):
|
|
|
|
|
# print(line)
|
|
|
|
|
if line:
|
|
|
|
|
for letter in line:
|
|
|
|
|
symbolCounts[letter] = symbolCounts.get(letter, 0) + 1
|
|
|
|
|
symbolCounts = sorted(
|
|
|
|
|
symbolCounts.items(), key=lambda kv: (kv[1], kv[0])
|
|
|
|
|
) # swap to Big -> little sort order
|
|
|
|
|
symbolCounts = list(map(lambda x: x[0], symbolCounts))
|
|
|
|
|
symbolCounts.reverse()
|
|
|
|
|
return symbolCounts
|
|
|
|
|
symbol_counts[letter] = symbol_counts.get(letter, 0) + 1
|
|
|
|
|
symbols_by_occurrence = sorted(symbol_counts.items(), key=lambda kv: (kv[1], kv[0]))
|
|
|
|
|
# swap to Big -> little sort order
|
|
|
|
|
symbols_by_occurrence = [x[0] for x in symbols_by_occurrence]
|
|
|
|
|
symbols_by_occurrence.reverse()
|
|
|
|
|
return symbols_by_occurrence
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getCJKGlyph(sym):
|
|
|
|
|
from bdflib.model import Glyph
|
|
|
|
|
def get_cjk_glyph(sym: str) -> str:
|
|
|
|
|
glyph: Glyph = cjk_font()[ord(sym)]
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
glyph: Glyph = cjkFont()[ord(sym)]
|
|
|
|
|
except:
|
|
|
|
|
return None
|
|
|
|
|
data = glyph.data
|
|
|
|
|
(srcLeft, srcBottom, srcW, srcH) = glyph.get_bounding_box()
|
|
|
|
|
dstW = 12
|
|
|
|
|
dstH = 16
|
|
|
|
|
src_left, src_bottom, src_w, src_h = glyph.get_bounding_box()
|
|
|
|
|
dst_w = 12
|
|
|
|
|
dst_h = 16
|
|
|
|
|
|
|
|
|
|
# The source data is a per-row list of ints. The first item is the bottom-
|
|
|
|
|
# most row. For each row, the LSB is the right-most pixel.
|
|
|
|
|
# Here, (x, y) is the coordinates with origin at the top-left.
|
|
|
|
|
def getCell(x, y):
|
|
|
|
|
def get_cell(x: int, y: int) -> bool:
|
|
|
|
|
# Adjust x coordinates by actual bounding box.
|
|
|
|
|
adjX = x - srcLeft
|
|
|
|
|
if adjX < 0 or adjX >= srcW:
|
|
|
|
|
adj_x = x - src_left
|
|
|
|
|
if adj_x < 0 or adj_x >= src_w:
|
|
|
|
|
return False
|
|
|
|
|
# Adjust y coordinates by actual bounding box, then place the glyph
|
|
|
|
|
# baseline 3px above the bottom edge to make it centre-ish.
|
|
|
|
|
# This metric is optimized for WenQuanYi Bitmap Song 9pt and assumes
|
|
|
|
|
# each glyph is to be placed in a 12x12px box.
|
|
|
|
|
adjY = y - (dstH - srcH - srcBottom - 3)
|
|
|
|
|
if adjY < 0 or adjY >= srcH:
|
|
|
|
|
adj_y = y - (dst_h - src_h - src_bottom - 3)
|
|
|
|
|
if adj_y < 0 or adj_y >= src_h:
|
|
|
|
|
return False
|
|
|
|
|
if data[srcH - adjY - 1] & (1 << (srcW - adjX - 1)):
|
|
|
|
|
if data[src_h - adj_y - 1] & (1 << (src_w - adj_x - 1)):
|
|
|
|
|
return True
|
|
|
|
|
else:
|
|
|
|
|
return False
|
|
|
|
|
@@ -224,20 +205,20 @@ def getCJKGlyph(sym):
|
|
|
|
|
# bottom half.
|
|
|
|
|
s = ""
|
|
|
|
|
for block in range(2):
|
|
|
|
|
for c in range(dstW):
|
|
|
|
|
for c in range(dst_w):
|
|
|
|
|
b = 0
|
|
|
|
|
for r in range(8):
|
|
|
|
|
if getCell(c, r + 8 * block):
|
|
|
|
|
if get_cell(c, r + 8 * block):
|
|
|
|
|
b |= 0x01 << r
|
|
|
|
|
s += f"0x{b:02X},"
|
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getCharsFromFontIndex(index: int) -> str:
|
|
|
|
|
'''
|
|
|
|
|
def get_chars_from_font_index(index: int) -> str:
|
|
|
|
|
"""
|
|
|
|
|
Converts the font table index into its corresponding string escape
|
|
|
|
|
sequence(s).
|
|
|
|
|
'''
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# We want to be able to use more than 254 symbols (excluding \x00 null
|
|
|
|
|
# terminator and \x01 new-line) in the font table but without making all
|
|
|
|
|
@@ -266,159 +247,144 @@ def getCharsFromFontIndex(index: int) -> str:
|
|
|
|
|
# ...
|
|
|
|
|
# 0xFF 0xFF => 15 * 0xFF - 15 + 255 = 4065
|
|
|
|
|
|
|
|
|
|
assert index >= 0
|
|
|
|
|
page = int((index + 14) / 0xFF)
|
|
|
|
|
assert page <= 0x0F
|
|
|
|
|
if index < 0:
|
|
|
|
|
raise ValueError("index must be positive")
|
|
|
|
|
page = (index + 0x0E) // 0xFF
|
|
|
|
|
if page > 0x0F:
|
|
|
|
|
raise ValueError("page value out of range")
|
|
|
|
|
if page == 0:
|
|
|
|
|
return "\\x%0.2X" % index
|
|
|
|
|
return f"\\x{index:02X}"
|
|
|
|
|
else:
|
|
|
|
|
# Into extended range
|
|
|
|
|
# Leader is 0xFz where z is the page number
|
|
|
|
|
# Following char is the remainder
|
|
|
|
|
leader = page + 0xF0
|
|
|
|
|
value = ((index + 14) % 0xFF) + 1
|
|
|
|
|
assert leader <= 0xFF
|
|
|
|
|
assert value <= 0xFF
|
|
|
|
|
return "\\x%0.2X\\x%0.2X" % (leader, value)
|
|
|
|
|
value = ((index + 0x0E) % 0xFF) + 0x01
|
|
|
|
|
|
|
|
|
|
if leader > 0xFF or value > 0xFF:
|
|
|
|
|
raise ValueError("value is out of range")
|
|
|
|
|
return f"\\x{leader:02X}\\x{value:02X}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def getFontMapAndTable(textList):
|
|
|
|
|
def get_font_map_and_table(text_list: List[str]) -> Tuple[str, Dict[str, str]]:
|
|
|
|
|
# the text list is sorted
|
|
|
|
|
# allocate out these in their order as number codes
|
|
|
|
|
symbolMap = {}
|
|
|
|
|
symbolMap["\n"] = "\\x01" # Force insert the newline char
|
|
|
|
|
symbol_map = {"\n": "\\x01"}
|
|
|
|
|
index = 2 # start at 2, as 0= null terminator,1 = new line
|
|
|
|
|
forcedFirstSymbols = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
|
|
|
|
|
forced_first_symbols = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
|
|
|
|
|
|
|
|
|
|
# Get the font table, which does not include CJK chars
|
|
|
|
|
fontTable = fontTables.getFontMap()
|
|
|
|
|
fontSmallTable = fontTables.getSmallFontMap()
|
|
|
|
|
font_table = font_tables.get_font_map()
|
|
|
|
|
font_small_table = font_tables.get_small_font_map()
|
|
|
|
|
|
|
|
|
|
# We want to put all CJK chars after non-CJK ones so that the CJK chars
|
|
|
|
|
# do not need to be in the small font table to save space.
|
|
|
|
|
# We assume all symbols not in the font table to be a CJK char.
|
|
|
|
|
# We also enforce that numbers are first.
|
|
|
|
|
orderedNormalSymList = forcedFirstSymbols + [x for x in textList if x not in forcedFirstSymbols and x in fontTable]
|
|
|
|
|
orderedCJKSymList = [x for x in textList if x not in forcedFirstSymbols and x not in fontTable]
|
|
|
|
|
ordered_normal_sym_list: List[str] = forced_first_symbols + [x for x in text_list if x not in forced_first_symbols and x in font_table]
|
|
|
|
|
ordered_cjk_sym_list: List[str] = [x for x in text_list if x not in forced_first_symbols and x not in font_table]
|
|
|
|
|
|
|
|
|
|
totalSymbolCount = len(orderedNormalSymList) + len(orderedCJKSymList)
|
|
|
|
|
total_symbol_count = len(ordered_normal_sym_list) + len(ordered_cjk_sym_list)
|
|
|
|
|
# \x00 is for NULL termination and \x01 is for newline, so the maximum
|
|
|
|
|
# number of symbols allowed is as follow (see also the comments in
|
|
|
|
|
# `getCharsFromFontIndex`):
|
|
|
|
|
if totalSymbolCount > (0x10 * 0xFF - 15) - 2:
|
|
|
|
|
log(f"Error, too many used symbols for this version (total {totalSymbolCount})")
|
|
|
|
|
# `get_chars_from_font_index`):
|
|
|
|
|
if total_symbol_count > (0x10 * 0xFF - 15) - 2: # 4063
|
|
|
|
|
logging.error(f"Error, too many used symbols for this version (total {total_symbol_count})")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
log("Generating fonts for {} symbols".format(totalSymbolCount))
|
|
|
|
|
logging.info(f"Generating fonts for {total_symbol_count} symbols")
|
|
|
|
|
|
|
|
|
|
for l in (orderedNormalSymList, orderedCJKSymList):
|
|
|
|
|
for sym in l:
|
|
|
|
|
assert(sym not in symbolMap)
|
|
|
|
|
symbolMap[sym] = getCharsFromFontIndex(index)
|
|
|
|
|
index = index + 1
|
|
|
|
|
for sym in chain(ordered_normal_sym_list, ordered_cjk_sym_list):
|
|
|
|
|
if sym in symbol_map:
|
|
|
|
|
raise ValueError("Symbol not found in symbol map")
|
|
|
|
|
symbol_map[sym] = get_chars_from_font_index(index)
|
|
|
|
|
index += 1
|
|
|
|
|
|
|
|
|
|
fontTableStrings = []
|
|
|
|
|
fontSmallTableStrings = []
|
|
|
|
|
for sym in orderedNormalSymList:
|
|
|
|
|
if sym not in fontTable:
|
|
|
|
|
log("Missing Large font element for {}".format(sym))
|
|
|
|
|
font_table_strings = []
|
|
|
|
|
font_small_table_strings = []
|
|
|
|
|
for sym in ordered_normal_sym_list:
|
|
|
|
|
if sym not in font_table:
|
|
|
|
|
logging.error(f"Missing Large font element for {sym}")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
fontLine = fontTable[sym]
|
|
|
|
|
fontTableStrings.append(fontLine + "//{} -> {}".format(symbolMap[sym], sym))
|
|
|
|
|
if sym not in fontSmallTable:
|
|
|
|
|
log("Missing Small font element for {}".format(sym))
|
|
|
|
|
font_line: str = font_table[sym]
|
|
|
|
|
font_table_strings.append(f"{font_line}//{symbol_map[sym]} -> {sym}")
|
|
|
|
|
if sym not in font_small_table:
|
|
|
|
|
logging.error(f"Missing Small font element for {sym}")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
fontLine = fontSmallTable[sym]
|
|
|
|
|
fontSmallTableStrings.append(
|
|
|
|
|
fontLine + "//{} -> {}".format(symbolMap[sym], sym)
|
|
|
|
|
)
|
|
|
|
|
font_line: str = font_small_table[sym]
|
|
|
|
|
font_small_table_strings.append(f"{font_line}//{symbol_map[sym]} -> {sym}")
|
|
|
|
|
|
|
|
|
|
for sym in orderedCJKSymList:
|
|
|
|
|
assert(sym not in fontTable)
|
|
|
|
|
fontLine = getCJKGlyph(sym)
|
|
|
|
|
if fontLine is None:
|
|
|
|
|
log("Missing Large font element for {}".format(sym))
|
|
|
|
|
for sym in ordered_cjk_sym_list:
|
|
|
|
|
if sym in font_table:
|
|
|
|
|
raise ValueError("Symbol already exists in font_table")
|
|
|
|
|
font_line = get_cjk_glyph(sym)
|
|
|
|
|
if font_line is None:
|
|
|
|
|
logging.error(f"Missing Large font element for {sym}")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
fontTableStrings.append(fontLine + "//{} -> {}".format(symbolMap[sym], sym))
|
|
|
|
|
font_table_strings.append(f"{font_line}//{symbol_map[sym]} -> {sym}")
|
|
|
|
|
# No data to add to the small font table
|
|
|
|
|
fontSmallTableStrings.append(
|
|
|
|
|
"// {} -> {}".format(symbolMap[sym], sym)
|
|
|
|
|
)
|
|
|
|
|
font_small_table_strings.append(f"// {symbol_map[sym]} -> {sym}")
|
|
|
|
|
|
|
|
|
|
outputTable = "const uint8_t USER_FONT_12[] = {" + to_unicode("\n")
|
|
|
|
|
for line in fontTableStrings:
|
|
|
|
|
output_table = "const uint8_t USER_FONT_12[] = {\n"
|
|
|
|
|
for line in font_table_strings:
|
|
|
|
|
# join font table int one large string
|
|
|
|
|
outputTable = outputTable + line + to_unicode("\n")
|
|
|
|
|
outputTable = outputTable + "};" + to_unicode("\n")
|
|
|
|
|
outputTable = outputTable + "const uint8_t USER_FONT_6x8[] = {" + to_unicode("\n")
|
|
|
|
|
for line in fontSmallTableStrings:
|
|
|
|
|
output_table += line + "\n"
|
|
|
|
|
output_table += "};\n"
|
|
|
|
|
output_table += "const uint8_t USER_FONT_6x8[] = {\n"
|
|
|
|
|
for line in font_small_table_strings:
|
|
|
|
|
# join font table int one large string
|
|
|
|
|
outputTable = outputTable + line + to_unicode("\n")
|
|
|
|
|
outputTable = outputTable + "};" + to_unicode("\n")
|
|
|
|
|
return (outputTable, symbolMap)
|
|
|
|
|
output_table += line + "\n"
|
|
|
|
|
output_table += "};\n"
|
|
|
|
|
return output_table, symbol_map
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convStr(symbolConversionTable, text):
|
|
|
|
|
def convert_string(symbol_conversion_table: Dict[str, str], text: str) -> str:
|
|
|
|
|
# convert all of the symbols from the string into escapes for their content
|
|
|
|
|
outputString = ""
|
|
|
|
|
output_string = ""
|
|
|
|
|
for c in text.replace("\\r", "").replace("\\n", "\n"):
|
|
|
|
|
if c not in symbolConversionTable:
|
|
|
|
|
log("Missing font definition for {}".format(c))
|
|
|
|
|
if c not in symbol_conversion_table:
|
|
|
|
|
logging.error(f"Missing font definition for {c}")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
else:
|
|
|
|
|
outputString = outputString + symbolConversionTable[c]
|
|
|
|
|
return outputString
|
|
|
|
|
output_string += symbol_conversion_table[c]
|
|
|
|
|
return output_string
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def writeLanguage(lang, defs, f):
|
|
|
|
|
languageCode = lang["languageCode"]
|
|
|
|
|
log("Generating block for " + languageCode)
|
|
|
|
|
def write_language(lang: dict, defs: dict, f: TextIO) -> None:
|
|
|
|
|
language_code: str = lang["languageCode"]
|
|
|
|
|
logging.info(f"Generating block for {language_code}")
|
|
|
|
|
# Iterate over all of the text to build up the symbols & counts
|
|
|
|
|
textList = getLetterCounts(defs, lang)
|
|
|
|
|
text_list = get_letter_counts(defs, lang)
|
|
|
|
|
# From the letter counts, need to make a symbol translator & write out the font
|
|
|
|
|
(fontTableText, symbolConversionTable) = getFontMapAndTable(textList)
|
|
|
|
|
font_table_text, symbol_conversion_table = get_font_map_and_table(text_list)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
langName = lang["languageLocalName"]
|
|
|
|
|
lang_name = lang["languageLocalName"]
|
|
|
|
|
except KeyError:
|
|
|
|
|
langName = languageCode
|
|
|
|
|
lang_name = language_code
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("\n// ---- " + langName + " ----\n\n"))
|
|
|
|
|
f.write(fontTableText)
|
|
|
|
|
f.write(to_unicode("\n// ---- " + langName + " ----\n\n"))
|
|
|
|
|
f.write(f"\n// ---- {lang_name} ----\n\n")
|
|
|
|
|
f.write(font_table_text)
|
|
|
|
|
f.write(f"\n// ---- {lang_name} ----\n\n")
|
|
|
|
|
|
|
|
|
|
# ----- Writing SettingsDescriptions
|
|
|
|
|
obj = lang["menuOptions"]
|
|
|
|
|
f.write(to_unicode("const char* SettingsDescriptions[] = {\n"))
|
|
|
|
|
f.write("const char* SettingsDescriptions[] = {\n")
|
|
|
|
|
|
|
|
|
|
maxLen = 25
|
|
|
|
|
max_len = 25
|
|
|
|
|
index = 0
|
|
|
|
|
for mod in defs["menuOptions"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
if "feature" in mod:
|
|
|
|
|
f.write(to_unicode("#ifdef " + mod["feature"] + "\n"))
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
" /* ["
|
|
|
|
|
+ "{:02d}".format(index)
|
|
|
|
|
+ "] "
|
|
|
|
|
+ eid.ljust(maxLen)[:maxLen]
|
|
|
|
|
+ " */ "
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
'"'
|
|
|
|
|
+ convStr(symbolConversionTable, (obj[eid]["desc"]))
|
|
|
|
|
+ '",'
|
|
|
|
|
+ "//{} \n".format(obj[eid]["desc"])
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
if "feature" in mod:
|
|
|
|
|
f.write(to_unicode("#endif\n"))
|
|
|
|
|
index = index + 1
|
|
|
|
|
f.write(f"#ifdef {mod['feature']}\n")
|
|
|
|
|
f.write(f" /* [{index:02d}] {eid.ljust(max_len)[:max_len]} */ ")
|
|
|
|
|
f.write(f"\"{convert_string(symbol_conversion_table, obj[eid]['desc'])}\",//{obj[eid]['desc']} \n")
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("};\n\n"))
|
|
|
|
|
if "feature" in mod:
|
|
|
|
|
f.write("#endif\n")
|
|
|
|
|
index += 1
|
|
|
|
|
|
|
|
|
|
f.write("};\n\n")
|
|
|
|
|
|
|
|
|
|
# ----- Writing Message strings
|
|
|
|
|
|
|
|
|
|
@@ -426,233 +392,134 @@ def writeLanguage(lang, defs, f):
|
|
|
|
|
|
|
|
|
|
for mod in defs["messages"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
sourceText = ""
|
|
|
|
|
source_text = ""
|
|
|
|
|
if "default" in mod:
|
|
|
|
|
sourceText = mod["default"]
|
|
|
|
|
source_text = mod["default"]
|
|
|
|
|
if eid in obj:
|
|
|
|
|
sourceText = obj[eid]
|
|
|
|
|
translatedText = convStr(symbolConversionTable, sourceText)
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
"const char* "
|
|
|
|
|
+ eid
|
|
|
|
|
+ ' = "'
|
|
|
|
|
+ translatedText
|
|
|
|
|
+ '";'
|
|
|
|
|
+ "//{} \n".format(sourceText.replace("\n", "_"))
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
source_text = obj[eid]
|
|
|
|
|
translated_text = convert_string(symbol_conversion_table, source_text)
|
|
|
|
|
source_text = source_text.replace("\n", "_")
|
|
|
|
|
f.write(f'const char* {eid} = "{translated_text}";//{source_text} \n')
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("\n"))
|
|
|
|
|
f.write("\n")
|
|
|
|
|
|
|
|
|
|
# ----- Writing Characters
|
|
|
|
|
|
|
|
|
|
obj = lang["characters"]
|
|
|
|
|
|
|
|
|
|
for mod in defs["characters"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
"const char* "
|
|
|
|
|
+ eid
|
|
|
|
|
+ ' = "'
|
|
|
|
|
+ convStr(symbolConversionTable, obj[eid])
|
|
|
|
|
+ '";'
|
|
|
|
|
+ "//{} \n".format(obj[eid])
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("\n"))
|
|
|
|
|
eid: str = mod["id"]
|
|
|
|
|
f.write(f'const char* {eid} = "{convert_string(symbol_conversion_table, obj[eid])}";//{obj[eid]} \n')
|
|
|
|
|
f.write("\n")
|
|
|
|
|
|
|
|
|
|
# Write out firmware constant options
|
|
|
|
|
constants = getConstants()
|
|
|
|
|
constants = get_constants()
|
|
|
|
|
for x in constants:
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
"const char* "
|
|
|
|
|
+ x[0]
|
|
|
|
|
+ ' = "'
|
|
|
|
|
+ convStr(symbolConversionTable, x[1])
|
|
|
|
|
+ '";'
|
|
|
|
|
+ "//{} \n".format(x[1])
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("\n"))
|
|
|
|
|
f.write(f'const char* {x[0]} = "{convert_string(symbol_conversion_table, x[1])}";//{x[1]} \n')
|
|
|
|
|
f.write("\n")
|
|
|
|
|
|
|
|
|
|
# Debug Menu
|
|
|
|
|
f.write(to_unicode("const char* DebugMenu[] = {\n"))
|
|
|
|
|
f.write("const char* DebugMenu[] = {\n")
|
|
|
|
|
|
|
|
|
|
for c in getDebugMenu():
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
'\t "' + convStr(symbolConversionTable, c) + '",' + "//{} \n".format(c)
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(to_unicode("};\n\n"))
|
|
|
|
|
for c in get_debug_menu():
|
|
|
|
|
f.write(f'\t "{convert_string(symbol_conversion_table, c)}",//{c} \n')
|
|
|
|
|
f.write("};\n\n")
|
|
|
|
|
|
|
|
|
|
# ----- Writing SettingsDescriptions
|
|
|
|
|
obj = lang["menuOptions"]
|
|
|
|
|
f.write(to_unicode("const char* SettingsShortNames[][2] = {\n"))
|
|
|
|
|
f.write("const char* SettingsShortNames[][2] = {\n")
|
|
|
|
|
|
|
|
|
|
maxLen = 25
|
|
|
|
|
max_len = 25
|
|
|
|
|
index = 0
|
|
|
|
|
for mod in defs["menuOptions"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
if "feature" in mod:
|
|
|
|
|
f.write(to_unicode("#ifdef " + mod["feature"] + "\n"))
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
" /* ["
|
|
|
|
|
+ "{:02d}".format(index)
|
|
|
|
|
+ "] "
|
|
|
|
|
+ eid.ljust(maxLen)[:maxLen]
|
|
|
|
|
+ " */ "
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
'{ "'
|
|
|
|
|
+ convStr(symbolConversionTable, (obj[eid]["text2"][0]))
|
|
|
|
|
+ '", "'
|
|
|
|
|
+ convStr(symbolConversionTable, (obj[eid]["text2"][1]))
|
|
|
|
|
+ '" },'
|
|
|
|
|
+ "//{} \n".format(obj[eid]["text2"])
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(f"#ifdef {mod['feature']}\n")
|
|
|
|
|
f.write(f" /* [{index:02d}] {eid.ljust(max_len)[:max_len]} */ ")
|
|
|
|
|
f.write(f'{{ "{convert_string(symbol_conversion_table, (obj[eid]["text2"][0]))}", "{convert_string(symbol_conversion_table, (obj[eid]["text2"][1]))}" }},//{obj[eid]["text2"]} \n')
|
|
|
|
|
|
|
|
|
|
if "feature" in mod:
|
|
|
|
|
f.write(to_unicode("#endif\n"))
|
|
|
|
|
index = index + 1
|
|
|
|
|
f.write("#endif\n")
|
|
|
|
|
index += 1
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("};\n\n"))
|
|
|
|
|
f.write("};\n\n")
|
|
|
|
|
|
|
|
|
|
# ----- Writing Menu Groups
|
|
|
|
|
obj = lang["menuGroups"]
|
|
|
|
|
f.write(to_unicode("const char* SettingsMenuEntries[" + str(len(obj)) + "] = {\n"))
|
|
|
|
|
f.write(f"const char* SettingsMenuEntries[{len(obj)}] = {{\n")
|
|
|
|
|
|
|
|
|
|
maxLen = 25
|
|
|
|
|
max_len = 25
|
|
|
|
|
for mod in defs["menuGroups"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
f.write(to_unicode(" /* " + eid.ljust(maxLen)[:maxLen] + " */ "))
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
'"'
|
|
|
|
|
+ convStr(
|
|
|
|
|
symbolConversionTable,
|
|
|
|
|
(obj[eid]["text2"][0]) + "\\n" + obj[eid]["text2"][1],
|
|
|
|
|
)
|
|
|
|
|
+ '",'
|
|
|
|
|
+ "//{} \n".format(obj[eid]["text2"])
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(f" /* {eid.ljust(max_len)[:max_len]} */ ")
|
|
|
|
|
txt = f'{obj[eid]["text2"][0]}\\n{obj[eid]["text2"][1]}'
|
|
|
|
|
f.write(f'"{convert_string(symbol_conversion_table, txt)}",//{obj[eid]["text2"]} \n')
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("};\n\n"))
|
|
|
|
|
f.write("};\n\n")
|
|
|
|
|
|
|
|
|
|
# ----- Writing Menu Groups Descriptions
|
|
|
|
|
obj = lang["menuGroups"]
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
"const char* SettingsMenuEntriesDescriptions[" + str(len(obj)) + "] = {\n"
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(f"const char* SettingsMenuEntriesDescriptions[{(len(obj))}] = {{\n")
|
|
|
|
|
|
|
|
|
|
maxLen = 25
|
|
|
|
|
max_len = 25
|
|
|
|
|
for mod in defs["menuGroups"]:
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
f.write(to_unicode(" /* " + eid.ljust(maxLen)[:maxLen] + " */ "))
|
|
|
|
|
f.write(
|
|
|
|
|
to_unicode(
|
|
|
|
|
'"'
|
|
|
|
|
+ convStr(symbolConversionTable, (obj[eid]["desc"]))
|
|
|
|
|
+ '",'
|
|
|
|
|
+ "//{} \n".format(obj[eid]["desc"])
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
f.write(f" /* {eid.ljust(max_len)[:max_len]} */ ")
|
|
|
|
|
f.write(f"\"{convert_string(symbol_conversion_table, (obj[eid]['desc']))}\",//{obj[eid]['desc']} \n")
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("};\n\n"))
|
|
|
|
|
f.write(
|
|
|
|
|
"const bool HasFahrenheit = "
|
|
|
|
|
+ ("true" if lang.get("tempUnitFahrenheit", True) else "false")
|
|
|
|
|
+ ";\n"
|
|
|
|
|
)
|
|
|
|
|
f.write("};\n\n")
|
|
|
|
|
f.write(f"const bool HasFahrenheit = {('true' if lang.get('tempUnitFahrenheit', True) else 'false')};\n")
|
|
|
|
|
|
|
|
|
|
f.write(to_unicode("\n// Verify SettingsItemIndex values:\n"))
|
|
|
|
|
f.write("\n// Verify SettingsItemIndex values:\n")
|
|
|
|
|
for i, mod in enumerate(defs["menuOptions"]):
|
|
|
|
|
eid = mod["id"]
|
|
|
|
|
f.write(to_unicode(
|
|
|
|
|
f"static_assert(static_cast<uint8_t>(SettingsItemIndex::{eid}) == {i});\n"))
|
|
|
|
|
f.write(f"static_assert(static_cast<uint8_t>(SettingsItemIndex::{eid}) == {i});\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def readVersion(jsonDir):
|
|
|
|
|
with open(os.path.relpath(jsonDir + "/../source/version.h"), "r") as version_file:
|
|
|
|
|
try:
|
|
|
|
|
for line in version_file:
|
|
|
|
|
if re.findall(r"^.*(?<=(#define)).*(?<=(BUILD_VERSION))", line):
|
|
|
|
|
line = re.findall(r"\"(.+?)\"", line)
|
|
|
|
|
if line:
|
|
|
|
|
version = line[0]
|
|
|
|
|
try:
|
|
|
|
|
version += (
|
|
|
|
|
"."
|
|
|
|
|
+ subprocess.check_output(
|
|
|
|
|
["git", "rev-parse", "--short=7", "HEAD"]
|
|
|
|
|
)
|
|
|
|
|
.strip()
|
|
|
|
|
.decode("ascii")
|
|
|
|
|
.upper()
|
|
|
|
|
)
|
|
|
|
|
# --short=7: the shorted hash with 7 digits. Increase/decrease if needed!
|
|
|
|
|
except OSError:
|
|
|
|
|
version += " git"
|
|
|
|
|
finally:
|
|
|
|
|
if version_file:
|
|
|
|
|
version_file.close()
|
|
|
|
|
return version
|
|
|
|
|
def read_version() -> str:
|
|
|
|
|
with open(HERE.parent / 'source' / 'version.h') as version_file:
|
|
|
|
|
for line in version_file:
|
|
|
|
|
if re.findall(r"^.*(?<=(#define)).*(?<=(BUILD_VERSION))", line):
|
|
|
|
|
line = re.findall(r"\"(.+?)\"", line)
|
|
|
|
|
if line:
|
|
|
|
|
version = line[0]
|
|
|
|
|
try:
|
|
|
|
|
version += f".{subprocess.check_output(['git', 'rev-parse', '--short=7', 'HEAD']).strip().decode('ascii').upper()}"
|
|
|
|
|
# --short=7: the shorted hash with 7 digits. Increase/decrease if needed!
|
|
|
|
|
except OSError:
|
|
|
|
|
version += " git"
|
|
|
|
|
return version
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def orderOutput(langDict):
|
|
|
|
|
# These languages go first
|
|
|
|
|
mandatoryOrder = ["EN"]
|
|
|
|
|
|
|
|
|
|
# Then add all others in alphabetical order
|
|
|
|
|
sortedKeys = sorted(langDict.keys())
|
|
|
|
|
|
|
|
|
|
# Add the rest as they come
|
|
|
|
|
for key in sortedKeys:
|
|
|
|
|
if key not in mandatoryOrder:
|
|
|
|
|
mandatoryOrder.append(key)
|
|
|
|
|
|
|
|
|
|
return mandatoryOrder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parseArgs():
|
|
|
|
|
def parse_args() -> argparse.Namespace:
|
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
|
parser.add_argument(
|
|
|
|
|
"--output", "-o", help="Target file", type=argparse.FileType("w"), required=True
|
|
|
|
|
)
|
|
|
|
|
parser.add_argument("languageCode", help="Language to generate")
|
|
|
|
|
parser.add_argument("--output", "-o",
|
|
|
|
|
help="Target file",
|
|
|
|
|
type=argparse.FileType("w"),
|
|
|
|
|
required=True
|
|
|
|
|
)
|
|
|
|
|
parser.add_argument("languageCode",
|
|
|
|
|
help="Language to generate")
|
|
|
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
jsonDir = HERE
|
|
|
|
|
|
|
|
|
|
args = parseArgs()
|
|
|
|
|
json_dir = HERE
|
|
|
|
|
|
|
|
|
|
args = parse_args()
|
|
|
|
|
try:
|
|
|
|
|
buildVersion = readVersion(jsonDir)
|
|
|
|
|
except:
|
|
|
|
|
log("error: could not get/extract build version")
|
|
|
|
|
buildVersion = read_version()
|
|
|
|
|
except FileNotFoundError:
|
|
|
|
|
logging.error("error: Could not find version info ")
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
log("Build version: " + buildVersion)
|
|
|
|
|
log("Making " + args.languageCode + " from " + jsonDir)
|
|
|
|
|
logging.info(f"Build version: {buildVersion}")
|
|
|
|
|
logging.info(f"Making {args.languageCode} from {json_dir}")
|
|
|
|
|
|
|
|
|
|
lang = readTranslation(jsonDir, args.languageCode)
|
|
|
|
|
defs = loadJson(os.path.join(jsonDir, "translations_def.js"), True)
|
|
|
|
|
out = args.output
|
|
|
|
|
writeStart(out)
|
|
|
|
|
writeLanguage(lang, defs, out)
|
|
|
|
|
lang_ = read_translation(json_dir, args.languageCode)
|
|
|
|
|
defs_ = load_json(os.path.join(json_dir, "translations_def.js"), True)
|
|
|
|
|
out_ = args.output
|
|
|
|
|
write_start(out_)
|
|
|
|
|
write_language(lang_, defs_, out_)
|
|
|
|
|
|
|
|
|
|
log("Done")
|
|
|
|
|
logging.info("Done")
|
|
|
|
|
|