Skip to content

Commit

Permalink
More fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
insolor committed Jun 1, 2024
1 parent 5325b56 commit 1f86173
Show file tree
Hide file tree
Showing 18 changed files with 74 additions and 41 deletions.
2 changes: 2 additions & 0 deletions df_translation_toolkit/convert/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
import df_translation_toolkit.convert.cli

__all__ = ["cli"]
4 changes: 2 additions & 2 deletions df_translation_toolkit/convert/hardcoded_po_to_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def prepare_dictionary(dictionary: Iterable[tuple[str, str]]) -> Iterable[tuple[
yield original_string, cleanup_string(translation)


def convert(po_file: TextIO, csv_file: TextIO):
def convert(po_file: TextIO, csv_file: TextIO) -> None:
dictionary = simple_read_po(po_file)
csv_writer = csv_utils.writer(csv_file)

Expand All @@ -29,7 +29,7 @@ def convert(po_file: TextIO, csv_file: TextIO):


@app.command()
def main(po_file: Path, csv_file: Path, encoding: str):
def main(po_file: Path, csv_file: Path, encoding: str) -> None:
"""
Convert a po file into a csv file in a specified encoding
"""
Expand Down
6 changes: 4 additions & 2 deletions df_translation_toolkit/convert/objects_po_to_csv.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from collections import defaultdict
from collections.abc import Iterable
from collections.abc import Iterable, Iterator
from pathlib import Path
from typing import TextIO

Expand All @@ -15,7 +15,9 @@
from df_translation_toolkit.validation.validation_models import ValidationException, ValidationProblem


def get_translations_from_tag_parts(original_parts: list[str], translation_parts: list[str]):
def get_translations_from_tag_parts(
original_parts: list[str], translation_parts: list[str]
) -> Iterator[tuple[str, str]]:
tag_translations = defaultdict(list)

prev_original = None
Expand Down
2 changes: 1 addition & 1 deletion df_translation_toolkit/create_mod/batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def main(vanilla_path: Path, destination_path: Path, encoding: str, languages: l
try:
fetch_po_from_git(language, destination_path)
except HTTPError as e:
raise Exception(f"Unable to download po file for language {language}. Error: {e.code}, {e.reason}")
raise Exception(f"Unable to download po file for language {language}. Error: {e.code}, {e.reason}") from e
Path.mkdir(destination_path / language.lower(), parents=True, exist_ok=True)
template_from_vanilla(vanilla_path, destination_path / language.lower())
from_template(destination_path / language.lower(), destination_path, language, encoding)
Expand Down
18 changes: 14 additions & 4 deletions df_translation_toolkit/create_mod/from_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,18 @@ def localize_directory(
with backup(file_path) as bak_name:
if object_type == "TEXT_SET":
yield from translate_plain_text_file(
bak_name, file_path, dictionaries.dictionary_textset, destination_encoding, False,
bak_name,
file_path,
dictionaries.dictionary_textset,
destination_encoding,
False,
)
else:
yield from translate_single_raw_file(
bak_name, file_path, dictionaries.dictionary_object, destination_encoding,
bak_name,
file_path,
dictionaries.dictionary_object,
destination_encoding,
)


Expand Down Expand Up @@ -116,8 +123,11 @@ def main(
destination_encoding: str,
source_encoding: str = "cp437",
) -> None:
assert template_path.exists(), "Source path doesn't exist"
assert translation_path.exists(), "Translation path doesn't exist"
if not template_path.exists():
raise ValueError("Source path doesn't exist")

if not translation_path.exists():
raise ValueError("Translation path doesn't exist")

dictionaries = get_dictionaries(translation_path, language)

Expand Down
2 changes: 1 addition & 1 deletion df_translation_toolkit/create_pot/from_string_dump.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from pathlib import Path
from pathlib import Path

import typer

Expand Down
42 changes: 29 additions & 13 deletions df_translation_toolkit/parse/parse_plain_text.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from collections.abc import Iterable
from collections.abc import Iterable, Iterator
from typing import NamedTuple


def skip_tags(s):
def skip_tags(s: str) -> Iterator[str]:
opened = 0
for char in s:
if char == "[":
Expand All @@ -14,13 +14,17 @@ def skip_tags(s):


class PlainTextFileToken(NamedTuple):
test: str
text: str
is_translatable: bool
line_number: int


def parse_plain_text_file(lines: Iterable[str], join_paragraphs=True, start_line=1) -> Iterable[PlainTextFileToken]:
def local_is_translatable(s):
def parse_plain_text_file(
lines: Iterable[str],
join_paragraphs: bool = True,
start_line: int = 1,
) -> Iterable[PlainTextFileToken]:
def local_is_translatable(s: str) -> bool:
return any(char.islower() for char in skip_tags(s))

lines = iter(lines)
Expand All @@ -31,7 +35,7 @@ def local_is_translatable(s):
# so the first line must be skipped before the text is fed to the function
if join_paragraphs:
# The first line contains file name, skip it
yield PlainTextFileToken(next(lines), False, start_line)
yield PlainTextFileToken(text=next(lines), is_translatable=False, line_number=start_line)
start_line += 1

paragraph_start_line = start_line
Expand All @@ -41,29 +45,41 @@ def local_is_translatable(s):
if local_is_translatable(line):
if line.startswith("[") and not (paragraph and paragraph[-1][-1].isalpha()):
if paragraph:
yield PlainTextFileToken(join_paragraph(paragraph), True, paragraph_start_line)
yield PlainTextFileToken(
text=join_paragraph(paragraph),
is_translatable=True,
line_number=paragraph_start_line,
)
paragraph = []
paragraph_start_line = line_number

if line.rstrip().endswith("]"):
yield PlainTextFileToken(line, True, line_number)
yield PlainTextFileToken(text=line, is_translatable=True, line_number=line_number)
else:
paragraph.append(line)
else:
paragraph.append(line)
else:
if paragraph:
yield PlainTextFileToken(join_paragraph(paragraph), True, paragraph_start_line)
yield PlainTextFileToken(
text=join_paragraph(paragraph),
is_translatable=True,
line_number=paragraph_start_line,
)
paragraph = []
paragraph_start_line = line_number

yield PlainTextFileToken(line, False, line_number) # Not translatable line
yield PlainTextFileToken(
text=line,
is_translatable=False,
line_number=line_number,
) # Not translatable line
else:
yield PlainTextFileToken(line, local_is_translatable(line), line_number)
yield PlainTextFileToken(text=line, is_translatable=local_is_translatable(line), line_number=line_number)

if paragraph:
yield PlainTextFileToken(join_paragraph(paragraph), True, paragraph_start_line)
yield PlainTextFileToken(text=join_paragraph(paragraph), is_translatable=True, line_number=paragraph_start_line)


def join_paragraph(paragraph):
def join_paragraph(paragraph: Iterable[str]) -> str:
return "\n".join(paragraph)
4 changes: 3 additions & 1 deletion df_translation_toolkit/parse/parse_text_set.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from collections.abc import Iterator
from pathlib import Path
from typing import TextIO

from df_translation_toolkit.create_pot.from_speech import extract_from_speech_file
from df_translation_toolkit.parse.parse_raws import split_tag, tokenize_raw_file
from df_translation_toolkit.utils.po_utils import TranslationItem


def skip_text_set_header(file: TextIO) -> None:
Expand All @@ -14,7 +16,7 @@ def skip_text_set_header(file: TextIO) -> None:
return


def extract_from_vanilla_text(file_name: Path, source_encoding: str):
def extract_from_vanilla_text(file_name: Path, source_encoding: str) -> Iterator[TranslationItem]:
with file_name.open(encoding=source_encoding) as file:
skip_text_set_header(file)
for item in extract_from_speech_file(file, file_name.name):
Expand Down
2 changes: 1 addition & 1 deletion df_translation_toolkit/translate/translate_plain_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def translate_plain_text_file(
dictionary: Mapping[str, str],
encoding: str,
join_paragraphs: bool,
):
) -> None:
with source_file_path.open() as source_file:
with destination_file_path.open("w", encoding=encoding) as destination_file:
yield destination_file_path.name
Expand Down
2 changes: 1 addition & 1 deletion df_translation_toolkit/translate/translate_raws.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def translate_raws(po_filename: Path, path: Path, encoding: str) -> Iterator[str
yield from translate_single_raw_file(bak_name, file_path, dictionary, encoding)


def main(po_filename: Path, path: Path, encoding: str):
def main(po_filename: Path, path: Path, encoding: str) -> str:
for filename in translate_raws(po_filename, path, encoding):
print(filename, file=sys.stderr)

Expand Down
4 changes: 2 additions & 2 deletions df_translation_toolkit/utils/df_ignore_string_rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
class IgnoringRuleRegistry:
all_rules: list[Callable[[str], bool]]

def __init__(self):
def __init__(self) -> str:
self.all_rules = []

def register(self, function: Callable[[str], bool]) -> Callable[[str], bool]:
Expand Down Expand Up @@ -422,5 +422,5 @@ def all_ignore_rules(string: str) -> bool:
return rules.check_ignore(string)


def dont_ignore(_string) -> bool:
def dont_ignore(_string: str) -> bool:
return False
10 changes: 5 additions & 5 deletions df_translation_toolkit/utils/fix_translated_strings.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from unidecode import unidecode_expect_nonascii as unidecode


def fix_leading_spaces(original_string: str, translation: str):
def fix_leading_spaces(original_string: str, translation: str) -> str:
"""
Adds missing space in the beginning of the translation.
Removes extra spaces, if the translation starts with "." or ",".
Expand All @@ -15,7 +15,7 @@ def fix_leading_spaces(original_string: str, translation: str):
return translation


def fix_trailing_spaces(original_string: str, translation: str):
def fix_trailing_spaces(original_string: str, translation: str) -> str:
"""
Adds a missing trailing space.
"""
Expand All @@ -25,7 +25,7 @@ def fix_trailing_spaces(original_string: str, translation: str):
return translation


def fix_spaces(original_string: str, translation: str):
def fix_spaces(original_string: str, translation: str) -> str:
"""
Fixes leading and trailing spaces of the translation string
"""
Expand All @@ -37,7 +37,7 @@ def fix_spaces(original_string: str, translation: str):
_exclusions = "¿¡"


def fix_unicode_symbols(s: str):
def fix_unicode_symbols(s: str) -> str:
return "".join(c if c.isalpha() or c in _exclusions else unidecode(c) for c in s)


Expand All @@ -50,7 +50,7 @@ def fix_unicode_symbols(s: str):
)


def cleanup_string(s):
def cleanup_string(s: str) -> str:
"""
Cleanup a string from unusual unicode characters (quotes, dashes etc.)
"""
Expand Down
2 changes: 1 addition & 1 deletion df_translation_toolkit/utils/maybe_open.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


@contextmanager
def maybe_open(file_name: str | Path, *args, **kwargs):
def maybe_open(file_name: str | Path, *args, **kwargs) -> None:
file = None
try:
if file_name:
Expand Down
7 changes: 3 additions & 4 deletions df_translation_toolkit/utils/po_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from collections.abc import Iterable
from dataclasses import dataclass
from typing import BinaryIO, TextIO
from typing import Any, BinaryIO, TextIO

from babel.messages import Catalog
from babel.messages.pofile import read_po, write_po
Expand All @@ -15,11 +15,10 @@ class TranslationItem:
line_number: int | None = None
translator_comment: str | None = None # "#"
extracted_comment: str | None = None # "#."
# reference: Optional[str] = None # "#: source_file: line_number
flag: str | None = None # "#,"
previous_untranslated_msgid: str | None = None # "#|"

def __eq__(self, other):
def __eq__(self, other: object) -> bool:
return (
isinstance(other, TranslationItem)
and self.text == other.text
Expand All @@ -36,7 +35,7 @@ def __eq__(self, other):
""".strip()


def save_pot(po_file: BinaryIO, template: Iterable[TranslationItem]):
def save_pot(po_file: BinaryIO, template: Iterable[TranslationItem]) -> None:
catalog = Catalog()

for item in template:
Expand Down
2 changes: 1 addition & 1 deletion df_translation_toolkit/validation/validation_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def contains_errors(problems: list[ValidationProblem]) -> bool:


class ValidationException(Exception):
def __init__(self, problems: list[ValidationProblem]):
def __init__(self, problems: list[ValidationProblem]) -> None:
self.problems = problems

def __str__(self) -> str:
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ select = ["ALL"]
ignore = [
"D",
"RUF001",
"ANN101",
"PTH123",
"T201",
]
fixable = ["ALL"]
unfixable = ["F401"]
Expand Down
1 change: 0 additions & 1 deletion tests/test_fix_translated_strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ def test_fix_leading_spaces(text, translation, fixed):
"text,translation,fixed",
[
("test ", "test", "test "),
# ("test", "test ", "test"),
],
)
def test_fix_trailing_spaces(text, translation, fixed):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_parse_raws.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
|[TWO_HANDED:27500]
|[MINIMUM_SIZE:22500]
|[MATERIAL_SIZE:1]
|[ATTACK:BLUNT:1:10:lash:lashes:NO_SUB:5000] - 5000 is not translatable
|[ATTACK:BLUNT:1:10:lash:lashes:NO_SUB:5000] - 5000 is not translatable
| [ATTACK_PREPARE_AND_RECOVER:4:4]
| [ATTACK_FLAG_BAD_MULTIATTACK]
""",
Expand Down

0 comments on commit 1f86173

Please sign in to comment.