1347 lines
46 KiB
Python
Executable File
1347 lines
46 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
import argparse
|
|
import functools
|
|
import json
|
|
import hashlib
|
|
import logging
|
|
import os
|
|
import pickle
|
|
import re
|
|
import subprocess
|
|
import sys
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
from typing import Dict, List, Optional, TextIO, Tuple, Union
|
|
from dataclasses import dataclass
|
|
|
|
from bdflib import reader as bdfreader
|
|
from bdflib.model import Font, Glyph
|
|
|
|
import font_tables
|
|
import brieflz
|
|
import objcopy
|
|
|
|
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
|
|
|
HERE = Path(__file__).resolve().parent
|
|
|
|
|
|
@functools.lru_cache(maxsize=None)
|
|
def cjk_font() -> Font:
|
|
with open(os.path.join(HERE, "wqy-bitmapsong/wenquanyi_9pt.bdf"), "rb") as f:
|
|
return bdfreader.read_bdf(f)
|
|
|
|
|
|
# Loading a single JSON file
|
|
def load_json(filename: str) -> dict:
|
|
with open(filename) as f:
|
|
return json.loads(f.read())
|
|
|
|
|
|
def get_language_unqiue_id(language_ascii_name: str):
|
|
"""
|
|
Given a language code, it will return a unique (enough) uint16_t id code
|
|
When we have a collision here we can tweak this, but language list should be fairly stable from now on
|
|
"""
|
|
return (
|
|
int(hashlib.sha1(language_ascii_name.encode("utf-8")).hexdigest(), 16) % 0xFFFF
|
|
)
|
|
|
|
|
|
def read_translation(json_root: Union[str, Path], lang_code: str) -> dict:
|
|
filename = f"translation_{lang_code}.json"
|
|
|
|
file_with_path = os.path.join(json_root, filename)
|
|
|
|
try:
|
|
lang = load_json(file_with_path)
|
|
except json.decoder.JSONDecodeError as e:
|
|
logging.error(f"Failed to decode {filename}")
|
|
logging.exception(str(e))
|
|
sys.exit(2)
|
|
|
|
validate_langcode_matches_content(filename, lang)
|
|
|
|
return lang
|
|
|
|
|
|
def validate_langcode_matches_content(filename: str, content: dict) -> None:
|
|
# Extract lang code from file name
|
|
lang_code = filename[12:-5].upper()
|
|
# ...and the one specified in the JSON file...
|
|
try:
|
|
lang_code_from_json = content["languageCode"]
|
|
except KeyError:
|
|
lang_code_from_json = "(missing)"
|
|
|
|
# ...cause they should be the same!
|
|
if lang_code != lang_code_from_json:
|
|
raise ValueError(
|
|
f"Invalid languageCode {lang_code_from_json} in file {filename}"
|
|
)
|
|
|
|
|
|
def write_start(f: TextIO):
|
|
f.write(
|
|
"// WARNING: THIS FILE WAS AUTO GENERATED BY make_translation.py. PLEASE DO NOT EDIT.\n"
|
|
)
|
|
f.write("\n")
|
|
f.write('#include "Translation.h"\n')
|
|
|
|
|
|
def get_constants(build_version: str) -> List[Tuple[str, str]]:
|
|
# Extra constants that are used in the firmware that are shared across all languages
|
|
return [
|
|
("SymbolPlus", "+"),
|
|
("SymbolMinus", "-"),
|
|
("SymbolSpace", " "),
|
|
("SymbolDot", "."),
|
|
("SymbolDegC", "C"),
|
|
("SymbolDegF", "F"),
|
|
("SymbolMinutes", "m"),
|
|
("SymbolSeconds", "s"),
|
|
("SymbolWatts", "W"),
|
|
("SymbolVolts", "V"),
|
|
("SymbolAmps", "A"),
|
|
("SymbolDC", "DC"),
|
|
("SymbolCellCount", "S"),
|
|
("SymbolVersionNumber", build_version),
|
|
("SymbolPDDebug", "PD Debug"),
|
|
("SymbolState", "State"),
|
|
("SymbolNoVBus", "No VBus"),
|
|
("SymbolVBus", "VBus"),
|
|
]
|
|
|
|
|
|
def get_debug_menu() -> List[str]:
|
|
return [
|
|
datetime.today().strftime("%d-%m-%y"),
|
|
"ID ",
|
|
"ACC ",
|
|
"PWR ",
|
|
"Vin ",
|
|
"Tip C ",
|
|
"Han C ",
|
|
"Max C ",
|
|
"UpTime ",
|
|
"Move ",
|
|
"Tip Res",
|
|
"Tip R ",
|
|
"Tip O ",
|
|
"HW G ",
|
|
"HW M ",
|
|
"HW P ",
|
|
"Hall ",
|
|
]
|
|
|
|
|
|
def get_accel_names_list() -> List[str]:
|
|
return [
|
|
"Scanning",
|
|
"None",
|
|
"MMA8652FC",
|
|
"LIS2DH12",
|
|
"BMA223",
|
|
"MSA301",
|
|
"SC7A20",
|
|
]
|
|
|
|
|
|
def get_power_source_list() -> List[str]:
|
|
return [
|
|
"DC",
|
|
"QC",
|
|
"PD W. VBus",
|
|
"PD No VBus",
|
|
]
|
|
|
|
|
|
def get_letter_counts(
|
|
defs: dict, lang: dict, build_version: str
|
|
) -> Tuple[List[str], Dict[str, int]]:
|
|
text_list = []
|
|
# iterate over all strings
|
|
obj = lang["menuOptions"]
|
|
for mod in defs["menuOptions"]:
|
|
eid = mod["id"]
|
|
text_list.append(obj[eid]["description"])
|
|
|
|
obj = lang["messagesWarn"]
|
|
for mod in defs["messagesWarn"]:
|
|
eid = mod["id"]
|
|
text_list.append(obj[eid]["message"])
|
|
|
|
obj = lang["characters"]
|
|
|
|
for mod in defs["characters"]:
|
|
eid = mod["id"]
|
|
text_list.append(obj[eid])
|
|
|
|
obj = lang["menuOptions"]
|
|
for mod in defs["menuOptions"]:
|
|
eid = mod["id"]
|
|
text_list.append(obj[eid]["displayText"])
|
|
|
|
obj = lang["menuGroups"]
|
|
for mod in defs["menuGroups"]:
|
|
eid = mod["id"]
|
|
text_list.append(obj[eid]["displayText"])
|
|
|
|
obj = lang["menuGroups"]
|
|
for mod in defs["menuGroups"]:
|
|
eid = mod["id"]
|
|
text_list.append(obj[eid]["description"])
|
|
constants = get_constants(build_version)
|
|
for x in constants:
|
|
text_list.append(x[1])
|
|
text_list.extend(get_debug_menu())
|
|
text_list.extend(get_accel_names_list())
|
|
text_list.extend(get_power_source_list())
|
|
|
|
# collapse all strings down into the composite letters and store totals for these
|
|
|
|
symbol_counts: dict[str, int] = {}
|
|
for line in text_list:
|
|
line = line.replace("\n", "").replace("\r", "")
|
|
line = line.replace("\\n", "").replace("\\r", "")
|
|
if line:
|
|
for letter in line:
|
|
symbol_counts[letter] = symbol_counts.get(letter, 0) + 1
|
|
# swap to Big -> little sort order
|
|
symbols_by_occurrence = [
|
|
x[0]
|
|
for x in sorted(
|
|
symbol_counts.items(), key=lambda kv: (kv[1], kv[0]), reverse=True
|
|
)
|
|
]
|
|
return symbols_by_occurrence, symbol_counts
|
|
|
|
|
|
def get_cjk_glyph(sym: str) -> bytes:
|
|
glyph: Glyph = cjk_font()[ord(sym)]
|
|
|
|
data = glyph.data
|
|
src_left, src_bottom, src_w, src_h = glyph.get_bounding_box()
|
|
dst_w = 12
|
|
dst_h = 16
|
|
|
|
# The source data is a per-row list of ints. The first item is the bottom-
|
|
# most row. For each row, the LSB is the right-most pixel.
|
|
# Here, (x, y) is the coordinates with origin at the top-left.
|
|
def get_cell(x: int, y: int) -> bool:
|
|
# Adjust x coordinates by actual bounding box.
|
|
adj_x = x - src_left
|
|
if adj_x < 0 or adj_x >= src_w:
|
|
return False
|
|
# Adjust y coordinates by actual bounding box, then place the glyph
|
|
# baseline 3px above the bottom edge to make it centre-ish.
|
|
# This metric is optimized for WenQuanYi Bitmap Song 9pt and assumes
|
|
# each glyph is to be placed in a 12x12px box.
|
|
adj_y = y - (dst_h - src_h - src_bottom - 3)
|
|
if adj_y < 0 or adj_y >= src_h:
|
|
return False
|
|
if data[src_h - adj_y - 1] & (1 << (src_w - adj_x - 1)):
|
|
return True
|
|
else:
|
|
return False
|
|
|
|
# A glyph in the font table is divided into upper and lower parts, each by
|
|
# 8px high. Each byte represents half if a column, with the LSB being the
|
|
# top-most pixel. The data goes from the left-most to the right-most column
|
|
# of the top half, then from the left-most to the right-most column of the
|
|
# bottom half.
|
|
bs = bytearray()
|
|
for block in range(2):
|
|
for c in range(dst_w):
|
|
b = 0
|
|
for r in range(8):
|
|
if get_cell(c, r + 8 * block):
|
|
b |= 0x01 << r
|
|
bs.append(b)
|
|
return bytes(bs)
|
|
|
|
|
|
def get_bytes_from_font_index(index: int) -> bytes:
|
|
"""
|
|
Converts the font table index into its corresponding bytes
|
|
"""
|
|
|
|
# We want to be able to use more than 254 symbols (excluding \x00 null
|
|
# terminator and \x01 new-line) in the font table but without making all
|
|
# the chars take 2 bytes. To do this, we use \xF1 to \xFF as lead bytes
|
|
# to designate double-byte chars, and leave the remaining as single-byte
|
|
# chars.
|
|
#
|
|
# For the sake of sanity, \x00 always means the end of string, so we skip
|
|
# \xF1\x00 and others in the mapping.
|
|
#
|
|
# Mapping example:
|
|
#
|
|
# 0x02 => 2
|
|
# 0x03 => 3
|
|
# ...
|
|
# 0xEF => 239
|
|
# 0xF0 => 240
|
|
# 0xF1 0x01 => 1 * 0xFF - 15 + 1 = 241
|
|
# 0xF1 0x02 => 1 * 0xFF - 15 + 2 = 242
|
|
# ...
|
|
# 0xF1 0xFF => 1 * 0xFF - 15 + 255 = 495
|
|
# 0xF2 0x01 => 2 * 0xFF - 15 + 1 = 496
|
|
# ...
|
|
# 0xF2 0xFF => 2 * 0xFF - 15 + 255 = 750
|
|
# 0xF3 0x01 => 3 * 0xFF - 15 + 1 = 751
|
|
# ...
|
|
# 0xFF 0xFF => 15 * 0xFF - 15 + 255 = 4065
|
|
|
|
if index < 0:
|
|
raise ValueError("index must be positive")
|
|
page = (index + 0x0E) // 0xFF
|
|
if page > 0x0F:
|
|
raise ValueError("page value out of range")
|
|
if page == 0:
|
|
return bytes([index])
|
|
else:
|
|
# Into extended range
|
|
# Leader is 0xFz where z is the page number
|
|
# Following char is the remainder
|
|
leader = page + 0xF0
|
|
value = ((index + 0x0E) % 0xFF) + 0x01
|
|
|
|
if leader > 0xFF or value > 0xFF:
|
|
raise ValueError("value is out of range")
|
|
return bytes([leader, value])
|
|
|
|
|
|
def bytes_to_escaped(b: bytes) -> str:
|
|
return "".join((f"\\x{i:02X}" for i in b))
|
|
|
|
|
|
def bytes_to_c_hex(b: bytes) -> str:
|
|
return ", ".join((f"0x{i:02X}" for i in b)) + ","
|
|
|
|
|
|
@dataclass
|
|
class FontMap:
|
|
font12: Dict[str, bytes]
|
|
font06: Dict[str, Optional[bytes]]
|
|
|
|
|
|
@dataclass
|
|
class FontMapsPerFont:
|
|
font12_maps: Dict[str, Dict[str, bytes]]
|
|
font06_maps: Dict[str, Dict[str, Optional[bytes]]]
|
|
sym_lists: Dict[str, List[str]]
|
|
|
|
|
|
def get_font_map_per_font(text_list: List[str]) -> FontMapsPerFont:
|
|
print(text_list)
|
|
pending_sym_set = set(text_list)
|
|
if len(pending_sym_set) != len(text_list):
|
|
raise ValueError("`text_list` contains duplicated symbols")
|
|
|
|
total_symbol_count = len(text_list)
|
|
# \x00 is for NULL termination and \x01 is for newline, so the maximum
|
|
# number of symbols allowed is as follow (see also the comments in
|
|
# `get_bytes_from_font_index`):
|
|
if total_symbol_count > (0x10 * 0xFF - 15) - 2: # 4063
|
|
raise ValueError(
|
|
f"Error, too many used symbols for this version (total {total_symbol_count})"
|
|
)
|
|
logging.info(f"Generating fonts for {total_symbol_count} symbols")
|
|
|
|
# Collect font bitmaps by the defined font order:
|
|
font12_maps: Dict[str, Dict[str, bytes]] = {}
|
|
font06_maps: Dict[str, Dict[str, Optional[bytes]]] = {}
|
|
sym_lists: Dict[str, List[str]] = {}
|
|
for font in font_tables.ALL_FONTS:
|
|
font12_maps[font] = {}
|
|
font12_map = font12_maps[font]
|
|
font06_maps[font] = {}
|
|
font06_map = font06_maps[font]
|
|
sym_lists[font] = []
|
|
sym_list = sym_lists[font]
|
|
|
|
if font == font_tables.NAME_CJK:
|
|
is_cjk = True
|
|
else:
|
|
is_cjk = False
|
|
font12: Dict[str, bytes]
|
|
font06: Dict[str, bytes]
|
|
font12, font06 = font_tables.get_font_maps_for_name(font)
|
|
|
|
for sym in text_list:
|
|
if sym not in pending_sym_set:
|
|
continue
|
|
if is_cjk:
|
|
font12_line = get_cjk_glyph(sym)
|
|
if font12_line is None:
|
|
continue
|
|
font06_line = None
|
|
else:
|
|
try:
|
|
font12_line = font12[sym]
|
|
font06_line = font06[sym]
|
|
except KeyError:
|
|
continue
|
|
font12_map[sym] = font12_line
|
|
font06_map[sym] = font06_line
|
|
sym_list.append(sym)
|
|
pending_sym_set.remove(sym)
|
|
|
|
if len(pending_sym_set) > 0:
|
|
raise KeyError(f"Symbols not found in our fonts: {pending_sym_set}")
|
|
|
|
return FontMapsPerFont(font12_maps, font06_maps, sym_lists)
|
|
|
|
|
|
def get_forced_first_symbols() -> List[str]:
|
|
"""Get the list of symbols that must always occur at start of small and large fonts
|
|
Used by firmware for displaying numbers and hex strings
|
|
|
|
Returns:
|
|
List[str]: List of single character strings that must be the first N entries in a font table
|
|
"""
|
|
forced_first_symbols = [
|
|
"0",
|
|
"1",
|
|
"2",
|
|
"3",
|
|
"4",
|
|
"5",
|
|
"6",
|
|
"7",
|
|
"8",
|
|
"9",
|
|
"a",
|
|
"b",
|
|
"c",
|
|
"d",
|
|
"e",
|
|
"f",
|
|
]
|
|
return forced_first_symbols
|
|
|
|
|
|
def get_sym_list_and_font_map(
|
|
text_list: List[str],
|
|
) -> Tuple[List[str], Dict[str, List[str]], FontMap]:
|
|
font_maps = get_font_map_per_font(text_list)
|
|
font12_maps = font_maps.font12_maps
|
|
font06_maps = font_maps.font06_maps
|
|
|
|
# Build the full font maps
|
|
font12_map = {}
|
|
font06_map = {}
|
|
for font in font_tables.ALL_FONTS:
|
|
font12_map.update(font12_maps[font])
|
|
font06_map.update(font06_maps[font])
|
|
|
|
# Collect all symbols by the original symbol order, but also making sure
|
|
# all symbols with only large font must be placed after all symbols with
|
|
# both small and large fonts
|
|
sym_list_both_fonts = []
|
|
sym_list_large_only = []
|
|
for sym in text_list:
|
|
if font06_map[sym] is None:
|
|
sym_list_large_only.append(sym)
|
|
else:
|
|
sym_list_both_fonts.append(sym)
|
|
sym_list = sym_list_both_fonts + sym_list_large_only
|
|
|
|
return sym_list, font_maps.sym_lists, FontMap(font12_map, font06_map)
|
|
|
|
|
|
def build_symbol_conversion_map(sym_list: List[str]) -> Dict[str, bytes]:
|
|
forced_first_symbols = get_forced_first_symbols()
|
|
if sym_list[: len(forced_first_symbols)] != forced_first_symbols:
|
|
raise ValueError("Symbol list does not start with forced_first_symbols.")
|
|
|
|
# the text list is sorted
|
|
# allocate out these in their order as number codes
|
|
symbol_map: Dict[str, bytes] = {"\n": bytes([1])}
|
|
index = 2 # start at 2, as 0= null terminator,1 = new line
|
|
|
|
# Assign symbol bytes by font index
|
|
for index, sym in enumerate(sym_list, index):
|
|
assert sym not in symbol_map
|
|
symbol_map[sym] = get_bytes_from_font_index(index)
|
|
|
|
return symbol_map
|
|
|
|
|
|
def make_font_table_cpp(
|
|
sym_list: List[str], font_map: FontMap, symbol_map: Dict[str, bytes]
|
|
) -> str:
|
|
output_table = make_font_table_named_cpp(
|
|
"USER_FONT_12", sym_list, font_map.font12, symbol_map
|
|
)
|
|
output_table += make_font_table_06_cpp(sym_list, font_map, symbol_map)
|
|
return output_table
|
|
|
|
|
|
def make_font_table_named_cpp(
|
|
name: Optional[str],
|
|
sym_list: List[str],
|
|
font_map: Dict[str, bytes],
|
|
symbol_map: Dict[str, bytes],
|
|
) -> str:
|
|
output_table = ""
|
|
if name:
|
|
output_table = f"const uint8_t {name}[] = {{\n"
|
|
for sym in sym_list:
|
|
output_table += f"{bytes_to_c_hex(font_map[sym])}//{bytes_to_escaped(symbol_map[sym])} -> {sym}\n"
|
|
if name:
|
|
output_table += f"}}; // {name}\n"
|
|
return output_table
|
|
|
|
|
|
def make_font_table_06_cpp(
|
|
sym_list: List[str], font_map: FontMap, symbol_map: Dict[str, bytes]
|
|
) -> str:
|
|
output_table = "const uint8_t USER_FONT_6x8[] = {\n"
|
|
for sym in sym_list:
|
|
font_bytes = font_map.font06[sym]
|
|
if font_bytes:
|
|
font_line = bytes_to_c_hex(font_bytes)
|
|
else:
|
|
font_line = "// " # placeholder
|
|
output_table += f"{font_line}//{bytes_to_escaped(symbol_map[sym])} -> {sym}\n"
|
|
output_table += "};\n"
|
|
return output_table
|
|
|
|
|
|
def convert_string_bytes(symbol_conversion_table: Dict[str, bytes], text: str) -> bytes:
|
|
# convert all of the symbols from the string into bytes for their content
|
|
output_string = b""
|
|
for c in text.replace("\\r", "").replace("\\n", "\n"):
|
|
if c not in symbol_conversion_table:
|
|
logging.error(f"Missing font definition for {c}")
|
|
sys.exit(1)
|
|
else:
|
|
output_string += symbol_conversion_table[c]
|
|
return output_string
|
|
|
|
|
|
def convert_string(symbol_conversion_table: Dict[str, bytes], text: str) -> str:
|
|
# convert all of the symbols from the string into escapes for their content
|
|
return bytes_to_escaped(convert_string_bytes(symbol_conversion_table, text))
|
|
|
|
|
|
def escape(string: str) -> str:
|
|
return json.dumps(string, ensure_ascii=False)
|
|
|
|
|
|
def write_bytes_as_c_array(
|
|
f: TextIO, name: str, data: bytes, indent: int = 2, bytes_per_line: int = 16
|
|
) -> None:
|
|
f.write(f"const uint8_t {name}[] = {{\n")
|
|
for i in range(0, len(data), bytes_per_line):
|
|
f.write(" " * indent)
|
|
f.write(", ".join((f"0x{b:02X}" for b in data[i : i + bytes_per_line])))
|
|
f.write(",\n")
|
|
f.write(f"}}; // {name}\n\n")
|
|
|
|
|
|
@dataclass
|
|
class LanguageData:
|
|
langs: List[dict]
|
|
defs: dict
|
|
build_version: str
|
|
sym_list: List[str]
|
|
sym_lists_by_font: Dict[str, List[str]]
|
|
font_map: FontMap
|
|
|
|
|
|
def prepare_language(lang: dict, defs: dict, build_version: str) -> LanguageData:
|
|
language_code: str = lang["languageCode"]
|
|
logging.info(f"Preparing language data for {language_code}")
|
|
# Iterate over all of the text to build up the symbols & counts
|
|
text_list, _ = get_letter_counts(defs, lang, build_version)
|
|
# From the letter counts, need to make a symbol translator & write out the font
|
|
|
|
forced_first_symbols = get_forced_first_symbols()
|
|
|
|
# We enforce that numbers come first.
|
|
text_list = forced_first_symbols + [
|
|
x for x in text_list if x not in forced_first_symbols
|
|
]
|
|
|
|
sym_list, sym_lists_by_font, font_map = get_sym_list_and_font_map(text_list)
|
|
return LanguageData(
|
|
[lang], defs, build_version, sym_list, sym_lists_by_font, font_map
|
|
)
|
|
|
|
|
|
def prepare_languages(
|
|
langs: List[dict], defs: dict, build_version: str
|
|
) -> LanguageData:
|
|
language_codes: List[str] = [lang["languageCode"] for lang in langs]
|
|
logging.info(f"Preparing language data for {language_codes}")
|
|
|
|
forced_first_symbols = get_forced_first_symbols()
|
|
|
|
# Build the full font maps
|
|
font12_map = {}
|
|
font06_map = {}
|
|
# Calculate total symbol counts per font:
|
|
total_sym_counts: Dict[str, Dict[str, int]] = {}
|
|
for lang in langs:
|
|
text_list, sym_counts = get_letter_counts(defs, lang, build_version)
|
|
text_list = forced_first_symbols + [
|
|
x for x in text_list if x not in forced_first_symbols
|
|
]
|
|
font_maps = get_font_map_per_font(text_list)
|
|
for font in font_tables.ALL_FONTS:
|
|
font12_map.update(font_maps.font12_maps[font])
|
|
font06_map.update(font_maps.font06_maps[font])
|
|
for font, font_sym_list in font_maps.sym_lists.items():
|
|
font_total_sym_counts = total_sym_counts.get(font, {})
|
|
for sym in font_sym_list:
|
|
font_total_sym_counts[sym] = font_total_sym_counts.get(
|
|
sym, 0
|
|
) + sym_counts.get(sym, 0)
|
|
total_sym_counts[font] = font_total_sym_counts
|
|
|
|
sym_lists_by_font: Dict[str, List[str]] = {}
|
|
combined_sym_list = []
|
|
for font in font_tables.ALL_FONTS:
|
|
if font not in total_sym_counts:
|
|
continue
|
|
# swap to Big -> little sort order
|
|
current_sym_list = [
|
|
x[0]
|
|
for x in sorted(
|
|
total_sym_counts[font].items(),
|
|
key=lambda kv: (kv[1], kv[0]),
|
|
reverse=True,
|
|
)
|
|
]
|
|
if font == font_tables.NAME_ASCII_BASIC:
|
|
# We enforce that numbers come first.
|
|
current_sym_list = forced_first_symbols + [
|
|
x for x in current_sym_list if x not in forced_first_symbols
|
|
]
|
|
sym_lists_by_font[font] = current_sym_list
|
|
combined_sym_list.extend(current_sym_list)
|
|
|
|
return LanguageData(
|
|
langs,
|
|
defs,
|
|
build_version,
|
|
combined_sym_list,
|
|
sym_lists_by_font,
|
|
FontMap(font12_map, font06_map),
|
|
)
|
|
|
|
|
|
def write_language(
|
|
data: LanguageData,
|
|
f: TextIO,
|
|
strings_bin: Optional[bytes] = None,
|
|
compress_font: bool = False,
|
|
) -> None:
|
|
if len(data.langs) > 1:
|
|
raise ValueError("More than 1 languages are provided")
|
|
lang = data.langs[0]
|
|
defs = data.defs
|
|
build_version = data.build_version
|
|
sym_list = data.sym_list
|
|
font_map = data.font_map
|
|
|
|
symbol_conversion_table = build_symbol_conversion_map(sym_list)
|
|
|
|
language_code: str = lang["languageCode"]
|
|
logging.info(f"Generating block for {language_code}")
|
|
|
|
try:
|
|
lang_name = lang["languageLocalName"]
|
|
except KeyError:
|
|
lang_name = language_code
|
|
|
|
if strings_bin or compress_font:
|
|
f.write('#include "brieflz.h"\n')
|
|
|
|
f.write(f"\n// ---- {lang_name} ----\n\n")
|
|
|
|
if not compress_font:
|
|
font_table_text = make_font_table_cpp(
|
|
sym_list, font_map, symbol_conversion_table
|
|
)
|
|
f.write(font_table_text)
|
|
f.write(
|
|
"const FontSection FontSectionsData[] = {\n"
|
|
" {\n"
|
|
" .symbol_start = 2,\n"
|
|
f" .symbol_end = {len(sym_list) + 2},\n"
|
|
" .font12_start_ptr = USER_FONT_12,\n"
|
|
" .font06_start_ptr = USER_FONT_6x8,\n"
|
|
" },\n"
|
|
"};\n"
|
|
"const FontSection *const FontSections = FontSectionsData;\n"
|
|
"const uint8_t FontSectionsCount = sizeof(FontSectionsData) / sizeof(FontSectionsData[0]);\n"
|
|
)
|
|
else:
|
|
font12_uncompressed = bytearray()
|
|
for sym in sym_list:
|
|
font12_uncompressed.extend(font_map.font12[sym])
|
|
font12_compressed = brieflz.compress(bytes(font12_uncompressed))
|
|
logging.info(
|
|
f"Font table 12x16 compressed from {len(font12_uncompressed)} to {len(font12_compressed)} bytes (ratio {len(font12_compressed) / len(font12_uncompressed):.3})"
|
|
)
|
|
write_bytes_as_c_array(f, "font_12x16_brieflz", font12_compressed)
|
|
font_table_text = make_font_table_06_cpp(
|
|
sym_list, font_map, symbol_conversion_table
|
|
)
|
|
f.write(font_table_text)
|
|
f.write(
|
|
f"static uint8_t font_out_buffer[{len(font12_uncompressed)}];\n"
|
|
"const FontSection FontSectionsData[] = {\n"
|
|
" {\n"
|
|
" .symbol_start = 2,\n"
|
|
f" .symbol_end = {len(sym_list) + 2},\n"
|
|
" .font12_start_ptr = font_out_buffer,\n"
|
|
" .font06_start_ptr = USER_FONT_6x8,\n"
|
|
" },\n"
|
|
"};\n"
|
|
"const FontSection *const FontSections = FontSectionsData;\n"
|
|
"const uint8_t FontSectionsCount = sizeof(FontSectionsData) / sizeof(FontSectionsData[0]);\n"
|
|
)
|
|
|
|
f.write(f"\n// ---- {lang_name} ----\n\n")
|
|
|
|
translation_common_text = get_translation_common_text(
|
|
defs, symbol_conversion_table, build_version
|
|
)
|
|
f.write(translation_common_text)
|
|
f.write(
|
|
f"const bool HasFahrenheit = {('true' if lang.get('tempUnitFahrenheit', True) else 'false')};\n\n"
|
|
)
|
|
|
|
if not strings_bin:
|
|
translation_strings_and_indices_text = get_translation_strings_and_indices_text(
|
|
lang, defs, symbol_conversion_table
|
|
)
|
|
f.write(translation_strings_and_indices_text)
|
|
f.write(
|
|
"const TranslationIndexTable *Tr = &translation.indices;\n"
|
|
"const char *TranslationStrings = translation.strings;\n\n"
|
|
)
|
|
else:
|
|
compressed = brieflz.compress(strings_bin)
|
|
logging.info(
|
|
f"Strings compressed from {len(strings_bin)} to {len(compressed)} bytes (ratio {len(compressed) / len(strings_bin):.3})"
|
|
)
|
|
write_bytes_as_c_array(f, "translation_data_brieflz", compressed)
|
|
f.write(
|
|
f"static uint8_t translation_data_out_buffer[{len(strings_bin)}] __attribute__((__aligned__(2)));\n\n"
|
|
"const TranslationIndexTable *Tr = reinterpret_cast<const TranslationIndexTable *>(translation_data_out_buffer);\n"
|
|
"const char *TranslationStrings = reinterpret_cast<const char *>(translation_data_out_buffer) + sizeof(TranslationIndexTable);\n\n"
|
|
)
|
|
|
|
if not strings_bin and not compress_font:
|
|
f.write("void prepareTranslations() {}\n\n")
|
|
else:
|
|
f.write("void prepareTranslations() {\n")
|
|
if compress_font:
|
|
f.write(
|
|
" blz_depack_srcsize(font_12x16_brieflz, font_out_buffer, sizeof(font_12x16_brieflz));\n"
|
|
)
|
|
if strings_bin:
|
|
f.write(
|
|
" blz_depack_srcsize(translation_data_brieflz, translation_data_out_buffer, sizeof(translation_data_brieflz));\n"
|
|
)
|
|
f.write("}\n\n")
|
|
|
|
sanity_checks_text = get_translation_sanity_checks_text(defs)
|
|
f.write(sanity_checks_text)
|
|
|
|
|
|
def write_languages(
|
|
data: LanguageData,
|
|
f: TextIO,
|
|
strings_obj_path: Optional[str] = None,
|
|
compress_font: bool = False,
|
|
) -> None:
|
|
defs = data.defs
|
|
build_version = data.build_version
|
|
combined_sym_list = data.sym_list
|
|
sym_lists_by_font = data.sym_lists_by_font
|
|
font_map = data.font_map
|
|
|
|
symbol_conversion_table = build_symbol_conversion_map(combined_sym_list)
|
|
|
|
language_codes: List[str] = [lang["languageCode"] for lang in data.langs]
|
|
logging.info(f"Generating block for {language_codes}")
|
|
|
|
lang_names = [
|
|
lang.get("languageLocalName", lang["languageCode"]) for lang in data.langs
|
|
]
|
|
|
|
f.write('#include "Translation_multi.h"')
|
|
|
|
f.write(f"\n// ---- {lang_names} ----\n\n")
|
|
|
|
max_decompressed_font_size = 0
|
|
if not compress_font:
|
|
font_table_text = ""
|
|
font_section_info_text = (
|
|
"const FontSectionDataInfo FontSectionDataInfos[] = {\n"
|
|
)
|
|
for font, current_sym_list in sym_lists_by_font.items():
|
|
font_table_text += f"const uint8_t font_table_data_{font}[] = {{\n"
|
|
font_table_text += "// 12x16:\n"
|
|
font_table_text += make_font_table_named_cpp(
|
|
None,
|
|
current_sym_list,
|
|
font_map.font12,
|
|
symbol_conversion_table,
|
|
)
|
|
if font != font_tables.NAME_CJK:
|
|
font_table_text += "// 6x8:\n"
|
|
font_table_text += make_font_table_named_cpp(
|
|
None,
|
|
current_sym_list,
|
|
font_map.font06, # type: ignore[arg-type]
|
|
symbol_conversion_table,
|
|
)
|
|
font_table_text += f"}}; // font_table_data_{font}\n"
|
|
if len(current_sym_list) == 0:
|
|
current_sym_start = 0
|
|
else:
|
|
current_sym_start = combined_sym_list.index(current_sym_list[0]) + 2
|
|
font_section_info_text += (
|
|
" {\n"
|
|
f" .symbol_start = {current_sym_start},\n"
|
|
f" .symbol_count = {len(current_sym_list)},\n"
|
|
f" .data_size = sizeof(font_table_data_{font}),\n"
|
|
" .data_is_compressed = false,\n"
|
|
f" .data_ptr = font_table_data_{font},\n"
|
|
" },\n"
|
|
)
|
|
|
|
f.write(font_table_text)
|
|
font_section_info_text += (
|
|
"};\n"
|
|
"const uint8_t FontSectionDataCount = sizeof(FontSectionDataInfos) / sizeof(FontSectionDataInfos[0]);\n\n"
|
|
)
|
|
f.write(font_section_info_text)
|
|
f.write(
|
|
"FontSection DynamicFontSections[4] = {};\n"
|
|
"const FontSection *const FontSections = DynamicFontSections;\n"
|
|
"const uint8_t FontSectionsCount = sizeof(DynamicFontSections) / sizeof(DynamicFontSections[0]);\n"
|
|
)
|
|
else:
|
|
font_section_info_text = (
|
|
"const FontSectionDataInfo FontSectionDataInfos[] = {\n"
|
|
)
|
|
for font, current_sym_list in sym_lists_by_font.items():
|
|
print(font, current_sym_list)
|
|
if len(current_sym_list) == 0:
|
|
continue
|
|
current_sym_start = combined_sym_list.index(current_sym_list[0]) + 2
|
|
font_uncompressed = bytearray()
|
|
for sym in current_sym_list:
|
|
font_uncompressed.extend(font_map.font12[sym])
|
|
if font != font_tables.NAME_CJK:
|
|
for sym in current_sym_list:
|
|
font_uncompressed.extend(font_map.font06[sym]) # type: ignore[arg-type]
|
|
font_compressed = brieflz.compress(bytes(font_uncompressed))
|
|
logging.info(
|
|
f"Font table for {font} compressed from {len(font_uncompressed)} to {len(font_compressed)} bytes (ratio {len(font_compressed) / len(font_uncompressed):.3})"
|
|
)
|
|
max_decompressed_font_size += len(font_uncompressed)
|
|
write_bytes_as_c_array(f, f"font_data_brieflz_{font}", font_compressed)
|
|
font_section_info_text += (
|
|
" {\n"
|
|
f" .symbol_start = {current_sym_start},\n"
|
|
f" .symbol_count = {len(current_sym_list)},\n"
|
|
f" .data_size = sizeof(font_data_brieflz_{font}),\n"
|
|
" .data_is_compressed = true,\n"
|
|
f" .data_ptr = font_data_brieflz_{font},\n"
|
|
" },\n"
|
|
)
|
|
font_section_info_text += (
|
|
"};\n"
|
|
"const uint8_t FontSectionDataCount = sizeof(FontSectionDataInfos) / sizeof(FontSectionDataInfos[0]);\n\n"
|
|
)
|
|
f.write(font_section_info_text)
|
|
f.write(
|
|
"FontSection DynamicFontSections[4] = {};\n"
|
|
"const FontSection *const FontSections = DynamicFontSections;\n"
|
|
"const uint8_t FontSectionsCount = sizeof(DynamicFontSections) / sizeof(DynamicFontSections[0]);\n"
|
|
)
|
|
|
|
f.write(f"\n// ---- {lang_names} ----\n\n")
|
|
|
|
translation_common_text = get_translation_common_text(
|
|
defs, symbol_conversion_table, build_version
|
|
)
|
|
f.write(translation_common_text)
|
|
f.write(
|
|
f"const bool HasFahrenheit = {('true' if any([lang.get('tempUnitFahrenheit', True) for lang in data.langs]) else 'false')};\n\n"
|
|
)
|
|
|
|
max_decompressed_translation_size = 0
|
|
if not strings_obj_path:
|
|
for lang in data.langs:
|
|
lang_code = lang["languageCode"]
|
|
translation_strings_and_indices_text = (
|
|
get_translation_strings_and_indices_text(
|
|
lang, defs, symbol_conversion_table, suffix=f"_{lang_code}"
|
|
)
|
|
)
|
|
f.write(translation_strings_and_indices_text)
|
|
f.write("const LanguageMeta LanguageMetas[] = {\n")
|
|
for lang in data.langs:
|
|
lang_code = lang["languageCode"]
|
|
lang_id = get_language_unqiue_id(lang_code)
|
|
f.write(
|
|
" {\n"
|
|
f" .uniqueID = {lang_id},\n"
|
|
f" .translation_data = reinterpret_cast<const uint8_t *>(&translation_{lang_code}),\n"
|
|
f" .translation_size = sizeof(translation_{lang_code}),\n"
|
|
f" .translation_is_compressed = false,\n"
|
|
" },\n"
|
|
)
|
|
f.write("};\n")
|
|
else:
|
|
for lang in data.langs:
|
|
lang_code = lang["languageCode"]
|
|
sym_name = objcopy.cpp_var_to_section_name(f"translation_{lang_code}")
|
|
strings_bin = objcopy.get_binary_from_obj(strings_obj_path, sym_name)
|
|
if len(strings_bin) == 0:
|
|
raise ValueError(f"Output for {sym_name} is empty")
|
|
max_decompressed_translation_size = max(
|
|
max_decompressed_translation_size, len(strings_bin)
|
|
)
|
|
compressed = brieflz.compress(strings_bin)
|
|
logging.info(
|
|
f"Strings for {lang_code} compressed from {len(strings_bin)} to {len(compressed)} bytes (ratio {len(compressed) / len(strings_bin):.3})"
|
|
)
|
|
write_bytes_as_c_array(
|
|
f, f"translation_data_brieflz_{lang_code}", compressed
|
|
)
|
|
f.write("const LanguageMeta LanguageMetas[] = {\n")
|
|
for lang in data.langs:
|
|
lang_code = lang["languageCode"]
|
|
lang_id = get_language_unqiue_id(lang_code)
|
|
f.write(
|
|
" {\n"
|
|
f" .uniqueID = {lang_id},\n"
|
|
f" .translation_data = translation_data_brieflz_{lang_code},\n"
|
|
f" .translation_size = sizeof(translation_data_brieflz_{lang_code}),\n"
|
|
f" .translation_is_compressed = true,\n"
|
|
" },\n"
|
|
)
|
|
f.write("};\n")
|
|
f.write(
|
|
"const uint8_t LanguageCount = sizeof(LanguageMetas) / sizeof(LanguageMetas[0]);\n\n"
|
|
f"alignas(TranslationData) uint8_t translation_data_out_buffer[{max_decompressed_translation_size + max_decompressed_font_size}];\n"
|
|
"const uint16_t translation_data_out_buffer_size = sizeof(translation_data_out_buffer);\n\n"
|
|
)
|
|
|
|
sanity_checks_text = get_translation_sanity_checks_text(defs)
|
|
f.write(sanity_checks_text)
|
|
|
|
|
|
def get_translation_common_text(
|
|
defs: dict, symbol_conversion_table: Dict[str, bytes], build_version
|
|
) -> str:
|
|
translation_common_text = ""
|
|
|
|
# Write out firmware constant options
|
|
constants = get_constants(build_version)
|
|
for x in constants:
|
|
translation_common_text += f'const char* {x[0]} = "{convert_string(symbol_conversion_table, x[1])}";//{x[1]} \n'
|
|
translation_common_text += "\n"
|
|
|
|
# Debug Menu
|
|
translation_common_text += "const char* DebugMenu[] = {\n"
|
|
|
|
for c in get_debug_menu():
|
|
translation_common_text += (
|
|
f'\t "{convert_string(symbol_conversion_table, c)}",//{c} \n'
|
|
)
|
|
translation_common_text += "};\n\n"
|
|
|
|
# accel names
|
|
translation_common_text += "const char* AccelTypeNames[] = {\n"
|
|
|
|
for c in get_accel_names_list():
|
|
translation_common_text += (
|
|
f'\t "{convert_string(symbol_conversion_table, c)}",//{c} \n'
|
|
)
|
|
translation_common_text += "};\n\n"
|
|
|
|
# power source types
|
|
translation_common_text += "const char* PowerSourceNames[] = {\n"
|
|
|
|
for c in get_power_source_list():
|
|
translation_common_text += (
|
|
f'\t "{convert_string(symbol_conversion_table, c)}",//{c} \n'
|
|
)
|
|
translation_common_text += "};\n\n"
|
|
|
|
return translation_common_text
|
|
|
|
|
|
@dataclass
|
|
class TranslationItem:
|
|
info: str
|
|
str_index: int
|
|
|
|
|
|
def get_translation_strings_and_indices_text(
|
|
lang: dict, defs: dict, symbol_conversion_table: Dict[str, bytes], suffix: str = ""
|
|
) -> str:
|
|
str_table: List[str] = []
|
|
str_group_messages: List[TranslationItem] = []
|
|
str_group_messageswarn: List[TranslationItem] = []
|
|
str_group_characters: List[TranslationItem] = []
|
|
str_group_settingdesc: List[TranslationItem] = []
|
|
str_group_settingshortnames: List[TranslationItem] = []
|
|
str_group_settingmenuentries: List[TranslationItem] = []
|
|
str_group_settingmenuentriesdesc: List[TranslationItem] = []
|
|
|
|
eid: str
|
|
|
|
# ----- Reading SettingsDescriptions
|
|
obj = lang["menuOptions"]
|
|
|
|
for index, mod in enumerate(defs["menuOptions"]):
|
|
eid = mod["id"]
|
|
str_group_settingdesc.append(
|
|
TranslationItem(f"[{index:02d}] {eid}", len(str_table))
|
|
)
|
|
str_table.append(obj[eid]["description"])
|
|
|
|
# ----- Reading Message strings
|
|
|
|
|
|
obj = lang["messagesWarn"]
|
|
|
|
for mod in defs["messagesWarn"]:
|
|
eid = mod["id"]
|
|
source_text = obj[eid]["message"]
|
|
if "\n" not in source_text:
|
|
source_text="\n"+source_text
|
|
|
|
str_group_messageswarn.append(TranslationItem(eid, len(str_table)))
|
|
str_table.append(source_text)
|
|
|
|
# ----- Reading Characters
|
|
|
|
obj = lang["characters"]
|
|
|
|
for mod in defs["characters"]:
|
|
eid = mod["id"]
|
|
str_group_characters.append(TranslationItem(eid, len(str_table)))
|
|
str_table.append(obj[eid])
|
|
|
|
# ----- Reading SettingsDescriptions
|
|
obj = lang["menuOptions"]
|
|
|
|
for index, mod in enumerate(defs["menuOptions"]):
|
|
eid = mod["id"]
|
|
|
|
source_text = obj[eid]["displayText"]
|
|
|
|
if "\n" not in source_text:
|
|
source_text="\n"+source_text
|
|
str_group_settingshortnames.append(
|
|
TranslationItem(f"[{index:02d}] {eid}", len(str_table))
|
|
)
|
|
str_table.append(source_text)
|
|
|
|
# ----- Reading Menu Groups
|
|
obj = lang["menuGroups"]
|
|
|
|
for index, mod in enumerate(defs["menuGroups"]):
|
|
eid = mod["id"]
|
|
source_text = obj[eid]["displayText"]
|
|
|
|
if "\n" not in source_text:
|
|
source_text="\n"+source_text
|
|
str_group_settingmenuentries.append(
|
|
TranslationItem(f"[{index:02d}] {eid}", len(str_table))
|
|
)
|
|
str_table.append(source_text)
|
|
|
|
# ----- Reading Menu Groups Descriptions
|
|
obj = lang["menuGroups"]
|
|
|
|
for index, mod in enumerate(defs["menuGroups"]):
|
|
eid = mod["id"]
|
|
str_group_settingmenuentriesdesc.append(
|
|
TranslationItem(f"[{index:02d}] {eid}", len(str_table))
|
|
)
|
|
str_table.append(obj[eid]["description"])
|
|
|
|
@dataclass
|
|
class RemappedTranslationItem:
|
|
str_index: int
|
|
str_start_offset: int = 0
|
|
|
|
# ----- Perform suffix merging optimization:
|
|
#
|
|
# We sort the backward strings so that strings with the same suffix will
|
|
# be next to each other, e.g.:
|
|
# "ef\0",
|
|
# "cdef\0",
|
|
# "abcdef\0",
|
|
backward_sorted_table: List[Tuple[int, str, bytes]] = sorted(
|
|
(
|
|
(i, s, bytes(reversed(convert_string_bytes(symbol_conversion_table, s))))
|
|
for i, s in enumerate(str_table)
|
|
),
|
|
key=lambda x: x[2],
|
|
)
|
|
str_remapping: List[Optional[RemappedTranslationItem]] = [None] * len(str_table)
|
|
for i, (str_index, source_str, converted) in enumerate(backward_sorted_table[:-1]):
|
|
j = i
|
|
while backward_sorted_table[j + 1][2].startswith(converted):
|
|
j += 1
|
|
if j + 1 == len(backward_sorted_table):
|
|
break
|
|
if j != i:
|
|
str_remapping[str_index] = RemappedTranslationItem(
|
|
str_index=backward_sorted_table[j][0],
|
|
str_start_offset=len(backward_sorted_table[j][2]) - len(converted),
|
|
)
|
|
|
|
# ----- Write the string table:
|
|
str_offsets = [-1] * len(str_table)
|
|
offset = 0
|
|
write_null = False
|
|
# NOTE: Cannot specify C99 designator here due to GCC (g++) bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55227
|
|
translation_strings_text = " /* .strings = */ {\n"
|
|
for i, source_str in enumerate(str_table):
|
|
if str_remapping[i] is not None:
|
|
continue
|
|
if write_null:
|
|
translation_strings_text += ' "\\0"\n'
|
|
write_null = True
|
|
# Find what items use this string
|
|
str_used_by = [i] + [
|
|
j for j, r in enumerate(str_remapping) if r and r.str_index == i
|
|
]
|
|
for j in str_used_by:
|
|
for group, pre_info in [
|
|
(str_group_messages, "messages"),
|
|
(str_group_messageswarn, "messagesWarn"),
|
|
(str_group_characters, "characters"),
|
|
(str_group_settingdesc, "SettingsDescriptions"),
|
|
(str_group_settingshortnames, "SettingsShortNames"),
|
|
(str_group_settingmenuentries, "SettingsMenuEntries"),
|
|
(str_group_settingmenuentriesdesc, "SettingsMenuEntriesDescriptions"),
|
|
]:
|
|
for item in group:
|
|
if item.str_index == j:
|
|
translation_strings_text += (
|
|
f" // - {pre_info} {item.info}\n"
|
|
)
|
|
if j == i:
|
|
translation_strings_text += (
|
|
f" // {offset: >4}: {escape(source_str)}\n"
|
|
)
|
|
str_offsets[j] = offset
|
|
else:
|
|
remapped = str_remapping[j]
|
|
assert remapped is not None
|
|
translation_strings_text += f" // {offset + remapped.str_start_offset: >4}: {escape(str_table[j])}\n"
|
|
str_offsets[j] = offset + remapped.str_start_offset
|
|
converted_bytes = convert_string_bytes(symbol_conversion_table, source_str)
|
|
translation_strings_text += f' "{bytes_to_escaped(converted_bytes)}"'
|
|
str_offsets[i] = offset
|
|
# Add the length and the null terminator
|
|
offset += len(converted_bytes) + 1
|
|
translation_strings_text += "\n }, // .strings\n\n"
|
|
|
|
str_total_bytes = offset
|
|
|
|
def get_offset(idx: int) -> int:
|
|
assert str_offsets[idx] >= 0
|
|
return str_offsets[idx]
|
|
|
|
translation_indices_text = " .indices = {\n"
|
|
|
|
# ----- Write the messages string indices:
|
|
for group in [str_group_messages, str_group_messageswarn, str_group_characters]:
|
|
for item in group:
|
|
translation_indices_text += f" .{item.info} = {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
|
|
translation_indices_text += "\n"
|
|
|
|
# ----- Write the settings index tables:
|
|
for group, name in [
|
|
(str_group_settingdesc, "SettingsDescriptions"),
|
|
(str_group_settingshortnames, "SettingsShortNames"),
|
|
(str_group_settingmenuentries, "SettingsMenuEntries"),
|
|
(str_group_settingmenuentriesdesc, "SettingsMenuEntriesDescriptions"),
|
|
]:
|
|
max_len = 30
|
|
translation_indices_text += f" .{name} = {{\n"
|
|
for item in group:
|
|
translation_indices_text += f" /* {item.info.ljust(max_len)[:max_len]} */ {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
|
|
translation_indices_text += f" }}, // {name}\n\n"
|
|
|
|
translation_indices_text += " }, // .indices\n\n"
|
|
|
|
return (
|
|
"struct {\n"
|
|
" TranslationIndexTable indices;\n"
|
|
f" char strings[{str_total_bytes}];\n"
|
|
f"}} const translation{suffix} = {{\n"
|
|
+ translation_indices_text
|
|
+ translation_strings_text
|
|
+ f"}}; // translation{suffix}\n\n"
|
|
)
|
|
|
|
|
|
def get_translation_sanity_checks_text(defs: dict) -> str:
|
|
sanity_checks_text = "\n// Verify SettingsItemIndex values:\n"
|
|
for i, mod in enumerate(defs["menuOptions"]):
|
|
eid = mod["id"]
|
|
sanity_checks_text += (
|
|
f"static_assert(static_cast<uint8_t>(SettingsItemIndex::{eid}) == {i});\n"
|
|
)
|
|
sanity_checks_text += f"static_assert(static_cast<uint8_t>(SettingsItemIndex::NUM_ITEMS) == {len(defs['menuOptions'])});\n"
|
|
return sanity_checks_text
|
|
|
|
|
|
def read_version() -> str:
|
|
with open(HERE.parent / "source" / "version.h") as version_file:
|
|
for line in version_file:
|
|
if re.findall(r"^.*(?<=(#define)).*(?<=(BUILD_VERSION))", line):
|
|
matches = re.findall(r"\"(.+?)\"", line)
|
|
if matches:
|
|
version = matches[0]
|
|
try:
|
|
version += f".{subprocess.check_output(['git', 'rev-parse', '--short=7', 'HEAD']).strip().decode('ascii').upper()}"
|
|
# --short=7: the shorted hash with 7 digits. Increase/decrease if needed!
|
|
except OSError:
|
|
version += " git"
|
|
return version
|
|
|
|
|
|
def parse_args() -> argparse.Namespace:
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument(
|
|
"--output-pickled",
|
|
help="Write pickled language data for later reuse",
|
|
type=argparse.FileType("wb"),
|
|
required=False,
|
|
dest="output_pickled",
|
|
)
|
|
parser.add_argument(
|
|
"--input-pickled",
|
|
help="Use previously generated pickled language data",
|
|
type=argparse.FileType("rb"),
|
|
required=False,
|
|
dest="input_pickled",
|
|
)
|
|
parser.add_argument(
|
|
"--strings-obj",
|
|
help="Use generated TranslationData by extracting from object file",
|
|
type=argparse.FileType("rb"),
|
|
required=False,
|
|
dest="strings_obj",
|
|
)
|
|
parser.add_argument(
|
|
"--compress-font",
|
|
help="Compress the font table",
|
|
action="store_true",
|
|
required=False,
|
|
dest="compress_font",
|
|
)
|
|
parser.add_argument(
|
|
"--output", "-o", help="Target file", type=argparse.FileType("w"), required=True
|
|
)
|
|
parser.add_argument(
|
|
"languageCodes",
|
|
metavar="languageCode",
|
|
nargs="+",
|
|
help="Language(s) to generate",
|
|
)
|
|
return parser.parse_args()
|
|
|
|
|
|
def main() -> None:
|
|
json_dir = HERE
|
|
|
|
args = parse_args()
|
|
if args.input_pickled and args.output_pickled:
|
|
logging.error("error: Both --output-pickled and --input-pickled are specified")
|
|
sys.exit(1)
|
|
|
|
language_data: LanguageData
|
|
if args.input_pickled:
|
|
logging.info(f"Reading pickled language data from {args.input_pickled.name}...")
|
|
language_data = pickle.load(args.input_pickled)
|
|
language_codes = [lang["languageCode"] for lang in language_data.langs]
|
|
if language_codes != args.languageCodes:
|
|
logging.error(
|
|
f"error: languageCode {args.languageCode} does not match language data {language_codes}"
|
|
)
|
|
sys.exit(1)
|
|
logging.info(f"Read language data for {language_codes}")
|
|
logging.info(f"Build version: {language_data.build_version}")
|
|
else:
|
|
try:
|
|
build_version = read_version()
|
|
except FileNotFoundError:
|
|
logging.error("error: Could not find version info ")
|
|
sys.exit(1)
|
|
|
|
logging.info(f"Build version: {build_version}")
|
|
logging.info(f"Making {args.languageCodes} from {json_dir}")
|
|
|
|
defs_ = load_json(os.path.join(json_dir, "translations_definitions.json"))
|
|
if len(args.languageCodes) == 1:
|
|
lang_ = read_translation(json_dir, args.languageCodes[0])
|
|
language_data = prepare_language(lang_, defs_, build_version)
|
|
else:
|
|
langs_ = [
|
|
read_translation(json_dir, lang_code)
|
|
for lang_code in args.languageCodes
|
|
]
|
|
language_data = prepare_languages(langs_, defs_, build_version)
|
|
|
|
out_ = args.output
|
|
write_start(out_)
|
|
if len(language_data.langs) == 1:
|
|
if args.strings_obj:
|
|
sym_name = objcopy.cpp_var_to_section_name("translation")
|
|
strings_bin = objcopy.get_binary_from_obj(args.strings_obj.name, sym_name)
|
|
if len(strings_bin) == 0:
|
|
raise ValueError(f"Output for {sym_name} is empty")
|
|
write_language(
|
|
language_data,
|
|
out_,
|
|
strings_bin=strings_bin,
|
|
compress_font=args.compress_font,
|
|
)
|
|
else:
|
|
language_data.font_map.font06
|
|
write_language(language_data, out_, compress_font=args.compress_font)
|
|
else:
|
|
if args.strings_obj:
|
|
write_languages(
|
|
language_data,
|
|
out_,
|
|
strings_obj_path=args.strings_obj.name,
|
|
compress_font=args.compress_font,
|
|
)
|
|
else:
|
|
write_languages(language_data, out_, compress_font=args.compress_font)
|
|
|
|
if args.output_pickled:
|
|
logging.info(f"Writing pickled data to {args.output_pickled.name}")
|
|
pickle.dump(language_data, args.output_pickled)
|
|
|
|
logging.info("Done")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|