mirror of
https://github.com/Ralim/IronOS.git
synced 2025-02-26 07:53:55 +00:00
[RFC] Multi-language firmware (second try) (#941)
* Impl. sectioned font table in firmware * make_translation.py: Extract build_symbol_conversion_table function * Put translation indices and strings in a struct * Move translation objcopy step to Python * Impl. multi-language firmware demo * Impl. strings-compressed multi-lang firmware demo * Add font compression to multi-lang demo * Refactor Makefile a bit * Fix rules for make < 4.3 * Add more multi-lang groups * Add Pinecil multi-lang CI build * Add lzfx compression license text * Remote multi-language demo group * Fix build after merge * Import code from BriefLZ * Change brieflz for our use case * Change compression to use brieflz * Remove lzfx code * Update license file for brieflz * Exclude brieflz files from format check * Add BriefLZ test
This commit is contained in:
189
Translations/brieflz.py
Normal file
189
Translations/brieflz.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import ctypes
|
||||
import functools
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
HERE = Path(__file__).resolve().parent
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _libbrieflz():
|
||||
so_path = os.path.join(HERE, "../source/Objects/host/brieflz/libbrieflz.so")
|
||||
libbrieflz = ctypes.cdll.LoadLibrary(so_path)
|
||||
return libbrieflz
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _fn_blz_max_packed_size():
|
||||
"""Returns the blz_max_packed_size C function.
|
||||
::
|
||||
|
||||
/**
|
||||
* Get bound on compressed data size.
|
||||
*
|
||||
* @see blz_pack
|
||||
*
|
||||
* @param src_size number of bytes to compress
|
||||
* @return maximum size of compressed data
|
||||
*/
|
||||
BLZ_API size_t
|
||||
blz_max_packed_size(size_t src_size);
|
||||
"""
|
||||
|
||||
fn = _libbrieflz().blz_max_packed_size
|
||||
fn.argtype = [
|
||||
ctypes.c_size_t,
|
||||
]
|
||||
fn.restype = ctypes.c_size_t
|
||||
return fn
|
||||
|
||||
|
||||
def blz_max_packed_size(src_size: int) -> int:
|
||||
"""Get bound on compressed data size."""
|
||||
fn_blz_max_packed_size = _fn_blz_max_packed_size()
|
||||
return int(fn_blz_max_packed_size(src_size))
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _fn_blz_workmem_size_level():
|
||||
"""Returns the blz_workmem_size_level C function.
|
||||
::
|
||||
|
||||
/**
|
||||
* Get required size of `workmem` buffer.
|
||||
*
|
||||
* @see blz_pack_level
|
||||
*
|
||||
* @param src_size number of bytes to compress
|
||||
* @param level compression level
|
||||
* @return required size in bytes of `workmem` buffer
|
||||
*/
|
||||
BLZ_API size_t
|
||||
blz_workmem_size_level(size_t src_size, int level);
|
||||
"""
|
||||
|
||||
fn = _libbrieflz().blz_workmem_size_level
|
||||
fn.argtype = [
|
||||
ctypes.c_size_t,
|
||||
ctypes.c_int,
|
||||
]
|
||||
fn.restype = ctypes.c_size_t
|
||||
return fn
|
||||
|
||||
|
||||
def blz_workmem_size_level(src_size: int, level: int) -> int:
|
||||
"""Get required size of `workmem` buffer."""
|
||||
fn_blz_workmem_size_level = _fn_blz_workmem_size_level()
|
||||
return int(fn_blz_workmem_size_level(src_size, level))
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _fn_blz_pack_level():
|
||||
"""Returns the blz_pack_level C function.
|
||||
::
|
||||
|
||||
/**
|
||||
* Compress `src_size` bytes of data from `src` to `dst`.
|
||||
*
|
||||
* Compression levels between 1 and 9 offer a trade-off between
|
||||
* time/space and ratio. Level 10 is optimal but very slow.
|
||||
*
|
||||
* @param src pointer to data
|
||||
* @param dst pointer to where to place compressed data
|
||||
* @param src_size number of bytes to compress
|
||||
* @param workmem pointer to memory for temporary use
|
||||
* @param level compression level
|
||||
* @return size of compressed data
|
||||
*/
|
||||
BLZ_API unsigned long
|
||||
blz_pack_level(const void *src, void *dst, unsigned long src_size,
|
||||
void *workmem, int level);
|
||||
"""
|
||||
|
||||
fn = _libbrieflz().blz_pack_level
|
||||
fn.argtype = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_ulong,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_int,
|
||||
]
|
||||
fn.restype = ctypes.c_ulong
|
||||
return fn
|
||||
|
||||
|
||||
def compress(data: bytes) -> bytes:
|
||||
"""Returns a bytes object of the brieflz-compressed data."""
|
||||
|
||||
fn_blz_pack_level = _fn_blz_pack_level()
|
||||
|
||||
output_buffer_len = blz_max_packed_size(len(data))
|
||||
|
||||
src = data
|
||||
dst = ctypes.create_string_buffer(output_buffer_len)
|
||||
src_size = len(src)
|
||||
workmem = ctypes.create_string_buffer(blz_workmem_size_level(len(data), 10))
|
||||
level = 10
|
||||
|
||||
res = fn_blz_pack_level(src, dst, src_size, workmem, level)
|
||||
|
||||
if res == 0:
|
||||
raise BriefLZError()
|
||||
else:
|
||||
return bytes(dst[:res]) # type: ignore
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _fn_blz_depack_srcsize():
|
||||
"""Returns the blz_depack_srcsize C function.
|
||||
::
|
||||
|
||||
/**
|
||||
* Decompress `src_size` bytes of data from `src` to `dst`.
|
||||
*
|
||||
* This function is unsafe. If the provided data is malformed, it may
|
||||
* read more than `src_size` from the `src` buffer.
|
||||
*
|
||||
* @param src pointer to compressed data
|
||||
* @param dst pointer to where to place decompressed data
|
||||
* @param src_size size of the compressed data
|
||||
* @return size of decompressed data
|
||||
*/
|
||||
BLZ_API unsigned long
|
||||
blz_depack_srcsize(const void *src, void *dst, unsigned long src_size);
|
||||
"""
|
||||
|
||||
fn = _libbrieflz().blz_depack_srcsize
|
||||
fn.argtype = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_ulong,
|
||||
]
|
||||
fn.restype = ctypes.c_ulong
|
||||
return fn
|
||||
|
||||
|
||||
def depack_srcsize(data: bytes, expected_depack_size: int) -> bytes:
|
||||
"""Returns a bytes object of the uncompressed data."""
|
||||
|
||||
fn_blz_depack_srcsize = _fn_blz_depack_srcsize()
|
||||
|
||||
output_buffer_len = expected_depack_size * 2
|
||||
|
||||
src = data
|
||||
dst = ctypes.create_string_buffer(output_buffer_len)
|
||||
src_size = len(src)
|
||||
|
||||
res = fn_blz_depack_srcsize(src, dst, src_size)
|
||||
|
||||
if res == 0:
|
||||
raise BriefLZError()
|
||||
else:
|
||||
return bytes(dst[:res]) # type: ignore
|
||||
|
||||
|
||||
class BriefLZError(Exception):
|
||||
"""Exception raised for brieflz compression or decompression error."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
24
Translations/brieflz_test.py
Normal file
24
Translations/brieflz_test.py
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python3
|
||||
import brieflz
|
||||
import unittest
|
||||
|
||||
|
||||
TEST_DATA = (
|
||||
b"Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
|
||||
b"Ut consequat mattis orci ac laoreet. Duis ac turpis tempus, varius lacus non, dignissim lectus. "
|
||||
b"Curabitur quis metus luctus, sollicitudin ipsum at, dictum metus. "
|
||||
b"Cras sed est nec ex tempor tincidunt in at ante. Vivamus laoreet urna eget lectus euismod feugiat. "
|
||||
b"Duis a massa ac metus pellentesque interdum. Nunc congue, est faucibus convallis commodo, justo nibh sagittis augue, sed tristique urna neque vitae urna. "
|
||||
b"Donec quis orci et purus imperdiet sollicitudin."
|
||||
)
|
||||
|
||||
|
||||
class TestBriefLZ(unittest.TestCase):
|
||||
def test_roundtrip(self):
|
||||
packed = brieflz.compress(TEST_DATA)
|
||||
depacked = brieflz.depack_srcsize(packed, len(TEST_DATA))
|
||||
self.assertEqual(depacked, TEST_DATA)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,91 +0,0 @@
|
||||
import ctypes
|
||||
import functools
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
HERE = Path(__file__).resolve().parent
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _liblzfx():
|
||||
so_path = os.path.join(HERE, "../source/Objects/host/lzfx/liblzfx.so")
|
||||
liblzfx = ctypes.cdll.LoadLibrary(so_path)
|
||||
return liblzfx
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _fn_lzfx_compress():
|
||||
"""Returns the lzfx_compress C function.
|
||||
::
|
||||
|
||||
/* Buffer-to buffer compression.
|
||||
|
||||
Supply pre-allocated input and output buffers via ibuf and obuf, and
|
||||
their size in bytes via ilen and olen. Buffers may not overlap.
|
||||
|
||||
On success, the function returns a non-negative value and the argument
|
||||
olen contains the compressed size in bytes. On failure, a negative
|
||||
value is returned and olen is not modified.
|
||||
*/
|
||||
int lzfx_compress(const void* ibuf, unsigned int ilen,
|
||||
void* obuf, unsigned int *olen);
|
||||
"""
|
||||
|
||||
fn = _liblzfx().lzfx_compress
|
||||
fn.argtype = [
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_uint,
|
||||
ctypes.c_char_p,
|
||||
ctypes.POINTER(ctypes.c_uint),
|
||||
]
|
||||
fn.restype = ctypes.c_int
|
||||
return fn
|
||||
|
||||
|
||||
def compress(data: bytes) -> bytes:
|
||||
"""Returns a bytes object of the lzfx-compressed data."""
|
||||
|
||||
fn_compress = _fn_lzfx_compress()
|
||||
|
||||
output_buffer_len = len(data) + 8
|
||||
|
||||
ibuf = data
|
||||
ilen = len(ibuf)
|
||||
obuf = ctypes.create_string_buffer(output_buffer_len)
|
||||
olen = ctypes.c_uint(output_buffer_len)
|
||||
|
||||
res = fn_compress(ibuf, ilen, obuf, ctypes.byref(olen))
|
||||
|
||||
if res < 0:
|
||||
raise LzfxError(res)
|
||||
else:
|
||||
return bytes(obuf[: olen.value]) # type: ignore
|
||||
|
||||
|
||||
class LzfxError(Exception):
|
||||
"""Exception raised for lzfx compression or decompression error.
|
||||
|
||||
Attributes:
|
||||
error_code -- The source error code, which is a negative integer
|
||||
error_name -- The constant name of the error
|
||||
message -- explanation of the error
|
||||
"""
|
||||
|
||||
# define LZFX_ESIZE -1 /* Output buffer too small */
|
||||
# define LZFX_ECORRUPT -2 /* Invalid data for decompression */
|
||||
# define LZFX_EARGS -3 /* Arguments invalid (NULL) */
|
||||
|
||||
def __init__(self, error_code):
|
||||
self.error_code = error_code
|
||||
if error_code == -1:
|
||||
self.error_name = "LZFX_ESIZE"
|
||||
self.message = "Output buffer too small"
|
||||
elif error_code == -2:
|
||||
self.error_name = "LZFX_ECORRUPT"
|
||||
self.message = "Invalid data for decompression"
|
||||
elif error_code == -3:
|
||||
self.error_name = "LZFX_EARGS"
|
||||
self.message = "Arguments invalid (NULL)"
|
||||
else:
|
||||
self.error_name = "UNKNOWN"
|
||||
self.message = "Unknown error"
|
||||
@@ -19,7 +19,8 @@ from bdflib import reader as bdfreader
|
||||
from bdflib.model import Font, Glyph
|
||||
|
||||
import font_tables
|
||||
import lzfx
|
||||
import brieflz
|
||||
import objcopy
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
||||
|
||||
@@ -118,7 +119,9 @@ def get_debug_menu() -> List[str]:
|
||||
]
|
||||
|
||||
|
||||
def get_letter_counts(defs: dict, lang: dict, build_version: str) -> List[str]:
|
||||
def get_letter_counts(
|
||||
defs: dict, lang: dict, build_version: str
|
||||
) -> Tuple[List[str], Dict[str, int]]:
|
||||
text_list = []
|
||||
# iterate over all strings
|
||||
obj = lang["menuOptions"]
|
||||
@@ -187,10 +190,12 @@ def get_letter_counts(defs: dict, lang: dict, build_version: str) -> List[str]:
|
||||
symbol_counts[letter] = symbol_counts.get(letter, 0) + 1
|
||||
# swap to Big -> little sort order
|
||||
symbols_by_occurrence = [
|
||||
x[0] for x in sorted(symbol_counts.items(), key=lambda kv: (kv[1], kv[0]))
|
||||
x[0]
|
||||
for x in sorted(
|
||||
symbol_counts.items(), key=lambda kv: (kv[1], kv[0]), reverse=True
|
||||
)
|
||||
]
|
||||
symbols_by_occurrence.reverse()
|
||||
return symbols_by_occurrence
|
||||
return symbols_by_occurrence, symbol_counts
|
||||
|
||||
|
||||
def get_cjk_glyph(sym: str) -> bytes:
|
||||
@@ -383,20 +388,14 @@ def get_font_map_per_font(text_list: List[str], fonts: List[str]) -> FontMapsPer
|
||||
return FontMapsPerFont(font12_maps, font06_maps, sym_lists)
|
||||
|
||||
|
||||
def get_font_map_and_table(
|
||||
text_list: List[str], fonts: List[str]
|
||||
) -> Tuple[List[str], FontMap, Dict[str, bytes]]:
|
||||
# the text list is sorted
|
||||
# allocate out these in their order as number codes
|
||||
symbol_map: Dict[str, bytes] = {"\n": bytes([1])}
|
||||
index = 2 # start at 2, as 0= null terminator,1 = new line
|
||||
def get_forced_first_symbols() -> List[str]:
|
||||
forced_first_symbols = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
|
||||
return forced_first_symbols
|
||||
|
||||
# We enforce that numbers come first.
|
||||
text_list = forced_first_symbols + [
|
||||
x for x in text_list if x not in forced_first_symbols
|
||||
]
|
||||
|
||||
def get_sym_list_and_font_map(
|
||||
text_list: List[str], fonts: List[str]
|
||||
) -> Tuple[List[str], Dict[str, List[str]], FontMap]:
|
||||
font_maps = get_font_map_per_font(text_list, fonts)
|
||||
font12_maps = font_maps.font12_maps
|
||||
font06_maps = font_maps.font06_maps
|
||||
@@ -420,29 +419,50 @@ def get_font_map_and_table(
|
||||
sym_list_both_fonts.append(sym)
|
||||
sym_list = sym_list_both_fonts + sym_list_large_only
|
||||
|
||||
return sym_list, font_maps.sym_lists, FontMap(font12_map, font06_map)
|
||||
|
||||
|
||||
def build_symbol_conversion_map(sym_list: List[str]) -> Dict[str, bytes]:
|
||||
forced_first_symbols = get_forced_first_symbols()
|
||||
if sym_list[: len(forced_first_symbols)] != forced_first_symbols:
|
||||
raise ValueError("Symbol list does not start with forced_first_symbols.")
|
||||
|
||||
# the text list is sorted
|
||||
# allocate out these in their order as number codes
|
||||
symbol_map: Dict[str, bytes] = {"\n": bytes([1])}
|
||||
index = 2 # start at 2, as 0= null terminator,1 = new line
|
||||
|
||||
# Assign symbol bytes by font index
|
||||
for index, sym in enumerate(sym_list, index):
|
||||
assert sym not in symbol_map
|
||||
symbol_map[sym] = get_bytes_from_font_index(index)
|
||||
|
||||
return sym_list, FontMap(font12_map, font06_map), symbol_map
|
||||
return symbol_map
|
||||
|
||||
|
||||
def make_font_table_cpp(
|
||||
sym_list: List[str], font_map: FontMap, symbol_map: Dict[str, bytes]
|
||||
) -> str:
|
||||
output_table = make_font_table_12_cpp(sym_list, font_map, symbol_map)
|
||||
output_table = make_font_table_named_cpp(
|
||||
"USER_FONT_12", sym_list, font_map.font12, symbol_map
|
||||
)
|
||||
output_table += make_font_table_06_cpp(sym_list, font_map, symbol_map)
|
||||
return output_table
|
||||
|
||||
|
||||
def make_font_table_12_cpp(
|
||||
sym_list: List[str], font_map: FontMap, symbol_map: Dict[str, bytes]
|
||||
def make_font_table_named_cpp(
|
||||
name: Optional[str],
|
||||
sym_list: List[str],
|
||||
font_map: Dict[str, bytes],
|
||||
symbol_map: Dict[str, bytes],
|
||||
) -> str:
|
||||
output_table = "const uint8_t USER_FONT_12[] = {\n"
|
||||
output_table = ""
|
||||
if name:
|
||||
output_table = f"const uint8_t {name}[] = {{\n"
|
||||
for sym in sym_list:
|
||||
output_table += f"{bytes_to_c_hex(font_map.font12[sym])}//{bytes_to_escaped(symbol_map[sym])} -> {sym}\n"
|
||||
output_table += "};\n"
|
||||
output_table += f"{bytes_to_c_hex(font_map[sym])}//{bytes_to_escaped(symbol_map[sym])} -> {sym}\n"
|
||||
if name:
|
||||
output_table += f"}}; // {name}\n"
|
||||
return output_table
|
||||
|
||||
|
||||
@@ -495,26 +515,102 @@ def write_bytes_as_c_array(
|
||||
|
||||
@dataclass
|
||||
class LanguageData:
|
||||
lang: dict
|
||||
langs: List[dict]
|
||||
defs: dict
|
||||
build_version: str
|
||||
sym_list: List[str]
|
||||
sym_lists_by_font: Dict[str, List[str]]
|
||||
font_map: FontMap
|
||||
symbol_conversion_table: Dict[str, bytes]
|
||||
|
||||
|
||||
def prepare_language(lang: dict, defs: dict, build_version: str) -> LanguageData:
|
||||
language_code: str = lang["languageCode"]
|
||||
logging.info(f"Preparing language data for {language_code}")
|
||||
# Iterate over all of the text to build up the symbols & counts
|
||||
text_list = get_letter_counts(defs, lang, build_version)
|
||||
text_list, _ = get_letter_counts(defs, lang, build_version)
|
||||
# From the letter counts, need to make a symbol translator & write out the font
|
||||
fonts = lang["fonts"]
|
||||
sym_list, font_map, symbol_conversion_table = get_font_map_and_table(
|
||||
text_list, fonts
|
||||
)
|
||||
|
||||
forced_first_symbols = get_forced_first_symbols()
|
||||
|
||||
# We enforce that numbers come first.
|
||||
text_list = forced_first_symbols + [
|
||||
x for x in text_list if x not in forced_first_symbols
|
||||
]
|
||||
|
||||
sym_list, sym_lists_by_font, font_map = get_sym_list_and_font_map(text_list, fonts)
|
||||
return LanguageData(
|
||||
lang, defs, build_version, sym_list, font_map, symbol_conversion_table
|
||||
[lang], defs, build_version, sym_list, sym_lists_by_font, font_map
|
||||
)
|
||||
|
||||
|
||||
def prepare_languages(
|
||||
langs: List[dict], defs: dict, build_version: str
|
||||
) -> LanguageData:
|
||||
language_codes: List[str] = [lang["languageCode"] for lang in langs]
|
||||
logging.info(f"Preparing language data for {language_codes}")
|
||||
|
||||
forced_first_symbols = get_forced_first_symbols()
|
||||
|
||||
all_fonts = [
|
||||
font_tables.NAME_ASCII_BASIC,
|
||||
font_tables.NAME_LATIN_EXTENDED,
|
||||
font_tables.NAME_CYRILLIC,
|
||||
font_tables.NAME_CJK,
|
||||
]
|
||||
|
||||
# Build the full font maps
|
||||
font12_map = {}
|
||||
font06_map = {}
|
||||
# Calculate total symbol counts per font:
|
||||
total_sym_counts: Dict[str, Dict[str, int]] = {}
|
||||
for lang in langs:
|
||||
text_list, sym_counts = get_letter_counts(defs, lang, build_version)
|
||||
fonts = lang["fonts"]
|
||||
text_list = forced_first_symbols + [
|
||||
x for x in text_list if x not in forced_first_symbols
|
||||
]
|
||||
font_maps = get_font_map_per_font(text_list, fonts)
|
||||
for font in fonts:
|
||||
font12_map.update(font_maps.font12_maps[font])
|
||||
font06_map.update(font_maps.font06_maps[font])
|
||||
for font, font_sym_list in font_maps.sym_lists.items():
|
||||
font_total_sym_counts = total_sym_counts.get(font, {})
|
||||
for sym in font_sym_list:
|
||||
font_total_sym_counts[sym] = font_total_sym_counts.get(
|
||||
sym, 0
|
||||
) + sym_counts.get(sym, 0)
|
||||
total_sym_counts[font] = font_total_sym_counts
|
||||
|
||||
sym_lists_by_font: Dict[str, List[str]] = {}
|
||||
combined_sym_list = []
|
||||
for font in all_fonts:
|
||||
if font not in total_sym_counts:
|
||||
continue
|
||||
# swap to Big -> little sort order
|
||||
current_sym_list = [
|
||||
x[0]
|
||||
for x in sorted(
|
||||
total_sym_counts[font].items(),
|
||||
key=lambda kv: (kv[1], kv[0]),
|
||||
reverse=True,
|
||||
)
|
||||
]
|
||||
if font == font_tables.NAME_ASCII_BASIC:
|
||||
# We enforce that numbers come first.
|
||||
current_sym_list = forced_first_symbols + [
|
||||
x for x in current_sym_list if x not in forced_first_symbols
|
||||
]
|
||||
sym_lists_by_font[font] = current_sym_list
|
||||
combined_sym_list.extend(current_sym_list)
|
||||
|
||||
return LanguageData(
|
||||
langs,
|
||||
defs,
|
||||
build_version,
|
||||
combined_sym_list,
|
||||
sym_lists_by_font,
|
||||
FontMap(font12_map, font06_map),
|
||||
)
|
||||
|
||||
|
||||
@@ -524,12 +620,15 @@ def write_language(
|
||||
strings_bin: Optional[bytes] = None,
|
||||
compress_font: bool = False,
|
||||
) -> None:
|
||||
lang = data.lang
|
||||
if len(data.langs) > 1:
|
||||
raise ValueError("More than 1 languages are provided")
|
||||
lang = data.langs[0]
|
||||
defs = data.defs
|
||||
build_version = data.build_version
|
||||
sym_list = data.sym_list
|
||||
font_map = data.font_map
|
||||
symbol_conversion_table = data.symbol_conversion_table
|
||||
|
||||
symbol_conversion_table = build_symbol_conversion_map(sym_list)
|
||||
|
||||
language_code: str = lang["languageCode"]
|
||||
logging.info(f"Generating block for {language_code}")
|
||||
@@ -540,7 +639,7 @@ def write_language(
|
||||
lang_name = language_code
|
||||
|
||||
if strings_bin or compress_font:
|
||||
f.write('#include "lzfx.h"\n')
|
||||
f.write('#include "brieflz.h"\n')
|
||||
|
||||
f.write(f"\n// ---- {lang_name} ----\n\n")
|
||||
|
||||
@@ -549,19 +648,44 @@ def write_language(
|
||||
sym_list, font_map, symbol_conversion_table
|
||||
)
|
||||
f.write(font_table_text)
|
||||
f.write(
|
||||
"const FontSection FontSectionsData[] = {\n"
|
||||
" {\n"
|
||||
" .symbol_start = 2,\n"
|
||||
f" .symbol_end = {len(sym_list) + 2},\n"
|
||||
" .font12_start_ptr = USER_FONT_12,\n"
|
||||
" .font06_start_ptr = USER_FONT_6x8,\n"
|
||||
" },\n"
|
||||
"};\n"
|
||||
"const FontSection *const FontSections = FontSectionsData;\n"
|
||||
"const uint8_t FontSectionsCount = sizeof(FontSectionsData) / sizeof(FontSectionsData[0]);\n"
|
||||
)
|
||||
else:
|
||||
font12_uncompressed = bytearray()
|
||||
for sym in sym_list:
|
||||
font12_uncompressed.extend(font_map.font12[sym])
|
||||
font12_compressed = lzfx.compress(bytes(font12_uncompressed))
|
||||
font12_compressed = brieflz.compress(bytes(font12_uncompressed))
|
||||
logging.info(
|
||||
f"Font table 12x16 compressed from {len(font12_uncompressed)} to {len(font12_compressed)} bytes (ratio {len(font12_compressed) / len(font12_uncompressed):.3})"
|
||||
)
|
||||
write_bytes_as_c_array(f, "font_12x16_lzfx", font12_compressed)
|
||||
write_bytes_as_c_array(f, "font_12x16_brieflz", font12_compressed)
|
||||
font_table_text = make_font_table_06_cpp(
|
||||
sym_list, font_map, symbol_conversion_table
|
||||
)
|
||||
f.write(font_table_text)
|
||||
f.write(
|
||||
f"static uint8_t font_out_buffer[{len(font12_uncompressed)}];\n"
|
||||
"const FontSection FontSectionsData[] = {\n"
|
||||
" {\n"
|
||||
" .symbol_start = 2,\n"
|
||||
f" .symbol_end = {len(sym_list) + 2},\n"
|
||||
" .font12_start_ptr = font_out_buffer,\n"
|
||||
" .font06_start_ptr = USER_FONT_6x8,\n"
|
||||
" },\n"
|
||||
"};\n"
|
||||
"const FontSection *const FontSections = FontSectionsData;\n"
|
||||
"const uint8_t FontSectionsCount = sizeof(FontSectionsData) / sizeof(FontSectionsData[0]);\n"
|
||||
)
|
||||
|
||||
f.write(f"\n// ---- {lang_name} ----\n\n")
|
||||
|
||||
@@ -573,49 +697,38 @@ def write_language(
|
||||
f"const bool HasFahrenheit = {('true' if lang.get('tempUnitFahrenheit', True) else 'false')};\n\n"
|
||||
)
|
||||
|
||||
if not compress_font:
|
||||
f.write("extern const uint8_t *const Font_12x16 = USER_FONT_12;\n")
|
||||
else:
|
||||
f.write(
|
||||
f"static uint8_t font_out_buffer[{len(font12_uncompressed)}];\n\n"
|
||||
"extern const uint8_t *const Font_12x16 = font_out_buffer;\n"
|
||||
)
|
||||
f.write("extern const uint8_t *const Font_6x8 = USER_FONT_6x8;\n\n")
|
||||
|
||||
if not strings_bin:
|
||||
translation_strings_and_indices_text = get_translation_strings_and_indices_text(
|
||||
lang, defs, symbol_conversion_table
|
||||
)
|
||||
f.write(translation_strings_and_indices_text)
|
||||
f.write(
|
||||
"const TranslationIndexTable *const Tr = &TranslationIndices;\n"
|
||||
"const char *const TranslationStrings = TranslationStringsData;\n\n"
|
||||
"const TranslationIndexTable *Tr = &translation.indices;\n"
|
||||
"const char *TranslationStrings = translation.strings;\n\n"
|
||||
)
|
||||
else:
|
||||
compressed = lzfx.compress(strings_bin)
|
||||
compressed = brieflz.compress(strings_bin)
|
||||
logging.info(
|
||||
f"Strings compressed from {len(strings_bin)} to {len(compressed)} bytes (ratio {len(compressed) / len(strings_bin):.3})"
|
||||
)
|
||||
write_bytes_as_c_array(f, "translation_data_lzfx", compressed)
|
||||
write_bytes_as_c_array(f, "translation_data_brieflz", compressed)
|
||||
f.write(
|
||||
f"static uint8_t translation_data_out_buffer[{len(strings_bin)}] __attribute__((__aligned__(2)));\n\n"
|
||||
"const TranslationIndexTable *const Tr = reinterpret_cast<const TranslationIndexTable *>(translation_data_out_buffer);\n"
|
||||
"const char *const TranslationStrings = reinterpret_cast<const char *>(translation_data_out_buffer) + sizeof(TranslationIndexTable);\n\n"
|
||||
"const TranslationIndexTable *Tr = reinterpret_cast<const TranslationIndexTable *>(translation_data_out_buffer);\n"
|
||||
"const char *TranslationStrings = reinterpret_cast<const char *>(translation_data_out_buffer) + sizeof(TranslationIndexTable);\n\n"
|
||||
)
|
||||
|
||||
if not strings_bin and not compress_font:
|
||||
f.write("void prepareTranslations() {}\n\n")
|
||||
else:
|
||||
f.write("void prepareTranslations() {\n" " unsigned int outsize;\n")
|
||||
f.write("void prepareTranslations() {\n")
|
||||
if compress_font:
|
||||
f.write(
|
||||
" outsize = sizeof(font_out_buffer);\n"
|
||||
" lzfx_decompress(font_12x16_lzfx, sizeof(font_12x16_lzfx), font_out_buffer, &outsize);\n"
|
||||
" blz_depack_srcsize(font_12x16_brieflz, font_out_buffer, sizeof(font_12x16_brieflz));\n"
|
||||
)
|
||||
if strings_bin:
|
||||
f.write(
|
||||
" outsize = sizeof(translation_data_out_buffer);\n"
|
||||
" lzfx_decompress(translation_data_lzfx, sizeof(translation_data_lzfx), translation_data_out_buffer, &outsize);\n"
|
||||
" blz_depack_srcsize(translation_data_brieflz, translation_data_out_buffer, sizeof(translation_data_brieflz));\n"
|
||||
)
|
||||
f.write("}\n\n")
|
||||
|
||||
@@ -623,6 +736,188 @@ def write_language(
|
||||
f.write(sanity_checks_text)
|
||||
|
||||
|
||||
def write_languages(
|
||||
data: LanguageData,
|
||||
f: TextIO,
|
||||
strings_obj_path: Optional[str] = None,
|
||||
compress_font: bool = False,
|
||||
) -> None:
|
||||
defs = data.defs
|
||||
build_version = data.build_version
|
||||
combined_sym_list = data.sym_list
|
||||
sym_lists_by_font = data.sym_lists_by_font
|
||||
font_map = data.font_map
|
||||
|
||||
symbol_conversion_table = build_symbol_conversion_map(combined_sym_list)
|
||||
|
||||
language_codes: List[str] = [lang["languageCode"] for lang in data.langs]
|
||||
logging.info(f"Generating block for {language_codes}")
|
||||
|
||||
lang_names = [
|
||||
lang.get("languageLocalName", lang["languageCode"]) for lang in data.langs
|
||||
]
|
||||
|
||||
f.write('#include "Translation_multi.h"')
|
||||
|
||||
f.write(f"\n// ---- {lang_names} ----\n\n")
|
||||
|
||||
max_decompressed_font_size = 0
|
||||
if not compress_font:
|
||||
font_table_text = ""
|
||||
font_section_info_text = (
|
||||
"const FontSectionDataInfo FontSectionDataInfos[] = {\n"
|
||||
)
|
||||
for font, current_sym_list in sym_lists_by_font.items():
|
||||
font_table_text += f"const uint8_t font_table_data_{font}[] = {{\n"
|
||||
font_table_text += "// 12x16:\n"
|
||||
font_table_text += make_font_table_named_cpp(
|
||||
None,
|
||||
current_sym_list,
|
||||
font_map.font12,
|
||||
symbol_conversion_table,
|
||||
)
|
||||
if font != font_tables.NAME_CJK:
|
||||
font_table_text += "// 6x8:\n"
|
||||
font_table_text += make_font_table_named_cpp(
|
||||
None,
|
||||
current_sym_list,
|
||||
font_map.font06, # type: ignore[arg-type]
|
||||
symbol_conversion_table,
|
||||
)
|
||||
font_table_text += f"}}; // font_table_data_{font}\n"
|
||||
current_sym_start = combined_sym_list.index(current_sym_list[0]) + 2
|
||||
font_section_info_text += (
|
||||
" {\n"
|
||||
f" .symbol_start = {current_sym_start},\n"
|
||||
f" .symbol_count = {len(current_sym_list)},\n"
|
||||
f" .data_size = sizeof(font_table_data_{font}),\n"
|
||||
" .data_is_compressed = false,\n"
|
||||
f" .data_ptr = font_table_data_{font},\n"
|
||||
" },\n"
|
||||
)
|
||||
|
||||
f.write(font_table_text)
|
||||
font_section_info_text += (
|
||||
"};\n"
|
||||
"const uint8_t FontSectionDataCount = sizeof(FontSectionDataInfos) / sizeof(FontSectionDataInfos[0]);\n\n"
|
||||
)
|
||||
f.write(font_section_info_text)
|
||||
f.write(
|
||||
"FontSection DynamicFontSections[4] = {};\n"
|
||||
"const FontSection *const FontSections = DynamicFontSections;\n"
|
||||
"const uint8_t FontSectionsCount = sizeof(DynamicFontSections) / sizeof(DynamicFontSections[0]);\n"
|
||||
)
|
||||
else:
|
||||
font_section_info_text = (
|
||||
"const FontSectionDataInfo FontSectionDataInfos[] = {\n"
|
||||
)
|
||||
for font, current_sym_list in sym_lists_by_font.items():
|
||||
current_sym_start = combined_sym_list.index(current_sym_list[0]) + 2
|
||||
font_uncompressed = bytearray()
|
||||
for sym in current_sym_list:
|
||||
font_uncompressed.extend(font_map.font12[sym])
|
||||
if font != font_tables.NAME_CJK:
|
||||
for sym in current_sym_list:
|
||||
font_uncompressed.extend(font_map.font06[sym]) # type: ignore[arg-type]
|
||||
font_compressed = brieflz.compress(bytes(font_uncompressed))
|
||||
logging.info(
|
||||
f"Font table for {font} compressed from {len(font_uncompressed)} to {len(font_compressed)} bytes (ratio {len(font_compressed) / len(font_uncompressed):.3})"
|
||||
)
|
||||
max_decompressed_font_size += len(font_uncompressed)
|
||||
write_bytes_as_c_array(f, f"font_data_brieflz_{font}", font_compressed)
|
||||
font_section_info_text += (
|
||||
" {\n"
|
||||
f" .symbol_start = {current_sym_start},\n"
|
||||
f" .symbol_count = {len(current_sym_list)},\n"
|
||||
f" .data_size = sizeof(font_data_brieflz_{font}),\n"
|
||||
" .data_is_compressed = true,\n"
|
||||
f" .data_ptr = font_data_brieflz_{font},\n"
|
||||
" },\n"
|
||||
)
|
||||
font_section_info_text += (
|
||||
"};\n"
|
||||
"const uint8_t FontSectionDataCount = sizeof(FontSectionDataInfos) / sizeof(FontSectionDataInfos[0]);\n\n"
|
||||
)
|
||||
f.write(font_section_info_text)
|
||||
f.write(
|
||||
"FontSection DynamicFontSections[4] = {};\n"
|
||||
"const FontSection *const FontSections = DynamicFontSections;\n"
|
||||
"const uint8_t FontSectionsCount = sizeof(DynamicFontSections) / sizeof(DynamicFontSections[0]);\n"
|
||||
)
|
||||
|
||||
f.write(f"\n// ---- {lang_names} ----\n\n")
|
||||
|
||||
translation_common_text = get_translation_common_text(
|
||||
defs, symbol_conversion_table, build_version
|
||||
)
|
||||
f.write(translation_common_text)
|
||||
f.write(
|
||||
f"const bool HasFahrenheit = {('true' if any([lang.get('tempUnitFahrenheit', True) for lang in data.langs]) else 'false')};\n\n"
|
||||
)
|
||||
|
||||
max_decompressed_translation_size = 0
|
||||
if not strings_obj_path:
|
||||
for lang in data.langs:
|
||||
lang_code = lang["languageCode"]
|
||||
translation_strings_and_indices_text = (
|
||||
get_translation_strings_and_indices_text(
|
||||
lang, defs, symbol_conversion_table, suffix=f"_{lang_code}"
|
||||
)
|
||||
)
|
||||
f.write(translation_strings_and_indices_text)
|
||||
f.write("const LanguageMeta LanguageMetas[] = {\n")
|
||||
for lang in data.langs:
|
||||
lang_code = lang["languageCode"]
|
||||
f.write(
|
||||
" {\n"
|
||||
# NOTE: Cannot specify C99 designator here due to GCC (g++) bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55227
|
||||
f' /* .code = */ "{lang_code}",\n'
|
||||
f" .translation_data = reinterpret_cast<const uint8_t *>(&translation_{lang_code}),\n"
|
||||
f" .translation_size = sizeof(translation_{lang_code}),\n"
|
||||
f" .translation_is_compressed = false,\n"
|
||||
" },\n"
|
||||
)
|
||||
f.write("};\n")
|
||||
else:
|
||||
for lang in data.langs:
|
||||
lang_code = lang["languageCode"]
|
||||
sym_name = objcopy.cpp_var_to_section_name(f"translation_{lang_code}")
|
||||
strings_bin = objcopy.get_binary_from_obj(strings_obj_path, sym_name)
|
||||
if len(strings_bin) == 0:
|
||||
raise ValueError(f"Output for {sym_name} is empty")
|
||||
max_decompressed_translation_size = max(
|
||||
max_decompressed_translation_size, len(strings_bin)
|
||||
)
|
||||
compressed = brieflz.compress(strings_bin)
|
||||
logging.info(
|
||||
f"Strings for {lang_code} compressed from {len(strings_bin)} to {len(compressed)} bytes (ratio {len(compressed) / len(strings_bin):.3})"
|
||||
)
|
||||
write_bytes_as_c_array(
|
||||
f, f"translation_data_brieflz_{lang_code}", compressed
|
||||
)
|
||||
f.write("const LanguageMeta LanguageMetas[] = {\n")
|
||||
for lang in data.langs:
|
||||
lang_code = lang["languageCode"]
|
||||
f.write(
|
||||
" {\n"
|
||||
# NOTE: Cannot specify C99 designator here due to GCC (g++) bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55227
|
||||
f' /* .code = */ "{lang_code}",\n'
|
||||
f" .translation_data = translation_data_brieflz_{lang_code},\n"
|
||||
f" .translation_size = sizeof(translation_data_brieflz_{lang_code}),\n"
|
||||
f" .translation_is_compressed = true,\n"
|
||||
" },\n"
|
||||
)
|
||||
f.write("};\n")
|
||||
f.write(
|
||||
"const uint8_t LanguageCount = sizeof(LanguageMetas) / sizeof(LanguageMetas[0]);\n\n"
|
||||
f"alignas(TranslationData) uint8_t translation_data_out_buffer[{max_decompressed_translation_size + max_decompressed_font_size}];\n"
|
||||
"const uint16_t translation_data_out_buffer_size = sizeof(translation_data_out_buffer);\n\n"
|
||||
)
|
||||
|
||||
sanity_checks_text = get_translation_sanity_checks_text(defs)
|
||||
f.write(sanity_checks_text)
|
||||
|
||||
|
||||
def get_translation_common_text(
|
||||
defs: dict, symbol_conversion_table: Dict[str, bytes], build_version
|
||||
) -> str:
|
||||
@@ -652,7 +947,7 @@ class TranslationItem:
|
||||
|
||||
|
||||
def get_translation_strings_and_indices_text(
|
||||
lang: dict, defs: dict, symbol_conversion_table: Dict[str, bytes]
|
||||
lang: dict, defs: dict, symbol_conversion_table: Dict[str, bytes], suffix: str = ""
|
||||
) -> str:
|
||||
str_table: List[str] = []
|
||||
str_group_messages: List[TranslationItem] = []
|
||||
@@ -780,6 +1075,8 @@ def get_translation_strings_and_indices_text(
|
||||
j = i
|
||||
while backward_sorted_table[j + 1][2].startswith(converted):
|
||||
j += 1
|
||||
if j + 1 == len(backward_sorted_table):
|
||||
break
|
||||
if j != i:
|
||||
str_remapping[str_index] = RemappedTranslationItem(
|
||||
str_index=backward_sorted_table[j][0],
|
||||
@@ -790,7 +1087,8 @@ def get_translation_strings_and_indices_text(
|
||||
str_offsets = [-1] * len(str_table)
|
||||
offset = 0
|
||||
write_null = False
|
||||
translation_strings_text = "const char TranslationStringsData[] = {\n"
|
||||
# NOTE: Cannot specify C99 designator here due to GCC (g++) bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55227
|
||||
translation_strings_text = " /* .strings = */ {\n"
|
||||
for i, source_str in enumerate(str_table):
|
||||
if str_remapping[i] is not None:
|
||||
continue
|
||||
@@ -814,33 +1112,37 @@ def get_translation_strings_and_indices_text(
|
||||
for item in group:
|
||||
if item.str_index == j:
|
||||
translation_strings_text += (
|
||||
f" // - {pre_info} {item.info}\n"
|
||||
f" // - {pre_info} {item.info}\n"
|
||||
)
|
||||
if j == i:
|
||||
translation_strings_text += f" // {offset: >4}: {escape(source_str)}\n"
|
||||
translation_strings_text += (
|
||||
f" // {offset: >4}: {escape(source_str)}\n"
|
||||
)
|
||||
str_offsets[j] = offset
|
||||
else:
|
||||
remapped = str_remapping[j]
|
||||
assert remapped is not None
|
||||
translation_strings_text += f" // {offset + remapped.str_start_offset: >4}: {escape(str_table[j])}\n"
|
||||
translation_strings_text += f" // {offset + remapped.str_start_offset: >4}: {escape(str_table[j])}\n"
|
||||
str_offsets[j] = offset + remapped.str_start_offset
|
||||
converted_bytes = convert_string_bytes(symbol_conversion_table, source_str)
|
||||
translation_strings_text += f' "{bytes_to_escaped(converted_bytes)}"'
|
||||
translation_strings_text += f' "{bytes_to_escaped(converted_bytes)}"'
|
||||
str_offsets[i] = offset
|
||||
# Add the length and the null terminator
|
||||
offset += len(converted_bytes) + 1
|
||||
translation_strings_text += "\n}; // TranslationStringsData\n\n"
|
||||
translation_strings_text += "\n }, // .strings\n\n"
|
||||
|
||||
str_total_bytes = offset
|
||||
|
||||
def get_offset(idx: int) -> int:
|
||||
assert str_offsets[idx] >= 0
|
||||
return str_offsets[idx]
|
||||
|
||||
translation_indices_text = "const TranslationIndexTable TranslationIndices = {\n"
|
||||
translation_indices_text = " .indices = {\n"
|
||||
|
||||
# ----- Write the messages string indices:
|
||||
for group in [str_group_messages, str_group_messageswarn, str_group_characters]:
|
||||
for item in group:
|
||||
translation_indices_text += f" .{item.info} = {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
|
||||
translation_indices_text += f" .{item.info} = {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
|
||||
translation_indices_text += "\n"
|
||||
|
||||
# ----- Write the settings index tables:
|
||||
@@ -851,14 +1153,22 @@ def get_translation_strings_and_indices_text(
|
||||
(str_group_settingmenuentriesdesc, "SettingsMenuEntriesDescriptions"),
|
||||
]:
|
||||
max_len = 30
|
||||
translation_indices_text += f" .{name} = {{\n"
|
||||
translation_indices_text += f" .{name} = {{\n"
|
||||
for item in group:
|
||||
translation_indices_text += f" /* {item.info.ljust(max_len)[:max_len]} */ {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
|
||||
translation_indices_text += f" }}, // {name}\n\n"
|
||||
translation_indices_text += f" /* {item.info.ljust(max_len)[:max_len]} */ {get_offset(item.str_index)}, // {escape(str_table[item.str_index])}\n"
|
||||
translation_indices_text += f" }}, // {name}\n\n"
|
||||
|
||||
translation_indices_text += "}; // TranslationIndices\n\n"
|
||||
translation_indices_text += " }, // .indices\n\n"
|
||||
|
||||
return translation_strings_text + translation_indices_text
|
||||
return (
|
||||
"struct {\n"
|
||||
" TranslationIndexTable indices;\n"
|
||||
f" char strings[{str_total_bytes}];\n"
|
||||
f"}} const translation{suffix} = {{\n"
|
||||
+ translation_indices_text
|
||||
+ translation_strings_text
|
||||
+ f"}}; // translation{suffix}\n\n"
|
||||
)
|
||||
|
||||
|
||||
def get_translation_sanity_checks_text(defs: dict) -> str:
|
||||
@@ -904,11 +1214,11 @@ def parse_args() -> argparse.Namespace:
|
||||
dest="input_pickled",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--strings-bin",
|
||||
help="Use generated TranslationIndices + TranslationStrings data and compress them",
|
||||
"--strings-obj",
|
||||
help="Use generated TranslationData by extracting from object file",
|
||||
type=argparse.FileType("rb"),
|
||||
required=False,
|
||||
dest="strings_bin",
|
||||
dest="strings_obj",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--compress-font",
|
||||
@@ -920,7 +1230,12 @@ def parse_args() -> argparse.Namespace:
|
||||
parser.add_argument(
|
||||
"--output", "-o", help="Target file", type=argparse.FileType("w"), required=True
|
||||
)
|
||||
parser.add_argument("languageCode", help="Language to generate")
|
||||
parser.add_argument(
|
||||
"languageCodes",
|
||||
metavar="languageCode",
|
||||
nargs="+",
|
||||
help="Language(s) to generate",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
@@ -936,12 +1251,13 @@ def main() -> None:
|
||||
if args.input_pickled:
|
||||
logging.info(f"Reading pickled language data from {args.input_pickled.name}...")
|
||||
language_data = pickle.load(args.input_pickled)
|
||||
if language_data.lang["languageCode"] != args.languageCode:
|
||||
language_codes = [lang["languageCode"] for lang in language_data.langs]
|
||||
if language_codes != args.languageCodes:
|
||||
logging.error(
|
||||
f"error: languageCode {args.languageCode} does not match language data {language_data.lang['languageCode']}"
|
||||
f"error: languageCode {args.languageCode} does not match language data {language_codes}"
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info(f"Read language data for {language_data.lang['languageCode']}")
|
||||
logging.info(f"Read language data for {language_codes}")
|
||||
logging.info(f"Build version: {language_data.build_version}")
|
||||
else:
|
||||
try:
|
||||
@@ -951,23 +1267,45 @@ def main() -> None:
|
||||
sys.exit(1)
|
||||
|
||||
logging.info(f"Build version: {build_version}")
|
||||
logging.info(f"Making {args.languageCode} from {json_dir}")
|
||||
logging.info(f"Making {args.languageCodes} from {json_dir}")
|
||||
|
||||
lang_ = read_translation(json_dir, args.languageCode)
|
||||
defs_ = load_json(os.path.join(json_dir, "translations_def.js"), True)
|
||||
language_data = prepare_language(lang_, defs_, build_version)
|
||||
if len(args.languageCodes) == 1:
|
||||
lang_ = read_translation(json_dir, args.languageCodes[0])
|
||||
language_data = prepare_language(lang_, defs_, build_version)
|
||||
else:
|
||||
langs_ = [
|
||||
read_translation(json_dir, lang_code)
|
||||
for lang_code in args.languageCodes
|
||||
]
|
||||
language_data = prepare_languages(langs_, defs_, build_version)
|
||||
|
||||
out_ = args.output
|
||||
write_start(out_)
|
||||
if args.strings_bin:
|
||||
write_language(
|
||||
language_data,
|
||||
out_,
|
||||
args.strings_bin.read(),
|
||||
compress_font=args.compress_font,
|
||||
)
|
||||
if len(language_data.langs) == 1:
|
||||
if args.strings_obj:
|
||||
sym_name = objcopy.cpp_var_to_section_name("translation")
|
||||
strings_bin = objcopy.get_binary_from_obj(args.strings_obj.name, sym_name)
|
||||
if len(strings_bin) == 0:
|
||||
raise ValueError(f"Output for {sym_name} is empty")
|
||||
write_language(
|
||||
language_data,
|
||||
out_,
|
||||
strings_bin=strings_bin,
|
||||
compress_font=args.compress_font,
|
||||
)
|
||||
else:
|
||||
write_language(language_data, out_, compress_font=args.compress_font)
|
||||
else:
|
||||
write_language(language_data, out_, compress_font=args.compress_font)
|
||||
if args.strings_obj:
|
||||
write_languages(
|
||||
language_data,
|
||||
out_,
|
||||
strings_obj_path=args.strings_obj.name,
|
||||
compress_font=args.compress_font,
|
||||
)
|
||||
else:
|
||||
write_languages(language_data, out_, compress_font=args.compress_font)
|
||||
|
||||
if args.output_pickled:
|
||||
logging.info(f"Writing pickled data to {args.output_pickled.name}")
|
||||
|
||||
25
Translations/objcopy.py
Normal file
25
Translations/objcopy.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
|
||||
if "OBJCOPY" in os.environ:
|
||||
OBJCOPY = os.environ["OBJCOPY"]
|
||||
else:
|
||||
OBJCOPY = "objcopy"
|
||||
|
||||
|
||||
def get_binary_from_obj(objfile_path: str, section_name: str) -> bytes:
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
result = subprocess.run(
|
||||
[OBJCOPY, "-O", "binary", "-j", section_name, objfile_path, tmpfile]
|
||||
)
|
||||
result.check_returncode()
|
||||
with open(tmpfd, "rb") as f:
|
||||
bin: bytes = f.read()
|
||||
os.remove(tmpfile)
|
||||
return bin
|
||||
|
||||
|
||||
def cpp_var_to_section_name(var_name: str) -> str:
|
||||
return f".rodata._ZL{len(var_name)}{var_name}"
|
||||
@@ -308,6 +308,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" BG Български"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" CS Český"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" DA Dansk"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +306,13 @@
|
||||
"Dauer"
|
||||
],
|
||||
"desc": "Dauer des Wachhalteimpulses (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" DE Deutsch"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" EN English"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +306,13 @@
|
||||
"duración"
|
||||
],
|
||||
"desc": "Duración del impulso de mantenimiento de la vigilia (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" ES Castellano"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,6 +302,13 @@
|
||||
"kesto"
|
||||
],
|
||||
"desc": "Herätyspulssin kesto (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" FI Suomi"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"impulsions"
|
||||
],
|
||||
"desc": "Durée des impulsions pour empêcher la mise en veille (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" FR Français"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" HR Hrvatski"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,6 +308,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" HU Magyar"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"impulso"
|
||||
],
|
||||
"desc": "Regola la durata dell'«impulso sveglia» [multipli di 250 ms]"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" IT Italiano"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,6 +201,10 @@
|
||||
"PowerPulseDuration": {
|
||||
"text2": "パルス時間長",
|
||||
"desc": "電源供給元をオンに保つために使用される、電力パルスの時間長 <x250ms(ミリ秒)>"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": "言語: 日本語",
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -308,6 +308,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" LT Lietuvių"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,6 +314,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" NL Nederlands"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" NL_BE Vlaams"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" NO Norsk"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +306,13 @@
|
||||
"impulsu mocy"
|
||||
],
|
||||
"desc": "Długość impulsu mocy zapobiegającego usypianiu powerbanku (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" PL Polski"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" PT Português"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +306,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" RU Русский"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" SK Slovenčina"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" SL Slovenščina"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" SR Српски"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" SR Srpski"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,6 +305,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" SV Svenska"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -329,6 +329,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" TR Türkçe"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,6 +306,13 @@
|
||||
"duration"
|
||||
],
|
||||
"desc": "Keep-awake-pulse duration (x 250ms)"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": [
|
||||
"Language:",
|
||||
" UK Українська"
|
||||
],
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,6 +201,10 @@
|
||||
"PowerPulseDuration": {
|
||||
"text2": "電源脈衝時長",
|
||||
"desc": "為保持電源喚醒,每次通電脈衝嘅時間長度 <x250ms(亳秒)>"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": "語言: 廣東話",
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,6 +201,10 @@
|
||||
"PowerPulseDuration": {
|
||||
"text2": "电源脉冲时长",
|
||||
"desc": "为保持电源唤醒,每次通电脉冲的时间长度 <x250ms(亳秒)>"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": "语言:简体中文",
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,6 +201,10 @@
|
||||
"PowerPulseDuration": {
|
||||
"text2": "電源脈衝時長",
|
||||
"desc": "為保持電源喚醒,每次通電脈衝的時間長度 <x250ms(亳秒)>"
|
||||
},
|
||||
"LanguageSwitch": {
|
||||
"text2": "語言:正體中文",
|
||||
"desc": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,6 +339,11 @@ var def =
|
||||
"id": "PowerPulseDuration",
|
||||
"maxLen": 6,
|
||||
"maxLen2": 13
|
||||
},
|
||||
{
|
||||
"id": "LanguageSwitch",
|
||||
"maxLen": 7,
|
||||
"maxLen2": 15
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user