From b98330d7d1160ad8b6462951ba3acca71dbd24cd Mon Sep 17 00:00:00 2001 From: Ren Tatsumoto Date: Thu, 4 Apr 2024 15:30:35 +0300 Subject: [PATCH] format files --- bulk_add.py | 4 +- config_view.py | 116 +++++++++++++++---------------- context_menu.py | 2 +- definitions.py | 4 +- gui.py | 85 +++++++++++----------- helpers/__init__.py | 20 +++--- helpers/audio_manager.py | 49 ++++++------- helpers/common_kana.py | 34 ++++----- helpers/file_ops.py | 12 ++-- helpers/goldendict_lookups.py | 2 +- helpers/inflections.py | 2 +- helpers/mingle_readings.py | 64 ++++++++--------- helpers/profiles.py | 3 +- helpers/sakura_client.py | 4 +- helpers/sqlite3_buddy.py | 72 +++++++++++-------- helpers/sqlite_schema.py | 7 +- helpers/tokens.py | 20 +++--- helpers/unique_files.py | 2 +- pitch_accents/acc_dict_mgr.py | 12 ++-- pitch_accents/basic_types.py | 24 ++++--- pitch_accents/common.py | 6 +- pitch_accents/format_accents.py | 4 +- pitch_accents/styles.py | 83 +++++++++++----------- pitch_accents/user_accents.py | 11 +-- reading.py | 54 +++++++------- tasks.py | 12 ++-- widgets/anki_style.py | 7 +- widgets/audio_sources.py | 15 ++-- widgets/audio_sources_stats.py | 4 +- widgets/enum_selector.py | 10 +-- widgets/pitch_override_table.py | 26 +++---- widgets/pitch_override_widget.py | 10 +-- widgets/table.py | 6 +- 33 files changed, 398 insertions(+), 388 deletions(-) diff --git a/bulk_add.py b/bulk_add.py index 7566ef1..53d989e 100644 --- a/bulk_add.py +++ b/bulk_add.py @@ -38,13 +38,13 @@ def bulk_add_readings(nids: Sequence[NoteId], parent: Browser) -> None: parent=parent, title="Tasks done", textFormat="rich", - text=f"Processed {len(nids)} selected notes." + text=f"Processed {len(nids)} selected notes.", ) ).run_in_background() def setup_browser_menu(browser: Browser): - """ Add menu entry to browser window """ + """Add menu entry to browser window""" action = QAction(ACTION_NAME, browser) qconnect(action.triggered, lambda: bulk_add_readings(browser.selectedNotes(), parent=browser)) browser.form.menuEdit.addAction(action) diff --git a/config_view.py b/config_view.py index aca8455..f9fc190 100644 --- a/config_view.py +++ b/config_view.py @@ -18,7 +18,7 @@ def split_words(config_value: str) -> list[str]: """Splits string by comma.""" - return re.split(r'[、, ]+', config_value, flags=RE_FLAGS) + return re.split(r"[、, ]+", config_value, flags=RE_FLAGS) class ConfigViewBase(AddonConfigManager): @@ -37,24 +37,24 @@ def write_config(self): class WordBlockListManager(ConfigViewBase): - _NUMBERS = re.compile(r'[一二三四五六七八九十01234567890123456789]+') + _NUMBERS = re.compile(r"[一二三四五六七八九十01234567890123456789]+") @property def _should_skip_numbers(self) -> bool: - return self['skip_numbers'] is True + return self["skip_numbers"] is True @property def blocklisted_words(self) -> list[str]: """Returns a user-defined list of blocklisted words.""" - return split_words(self['blocklisted_words']) + return split_words(self["blocklisted_words"]) def is_blocklisted(self, word: str) -> bool: """Returns True if the user specified that the word should not be looked up.""" - - return ( - to_katakana(word) in map(to_katakana, self.blocklisted_words) - or (self._should_skip_numbers and re.fullmatch(self._NUMBERS, word)) - ) + if to_katakana(word) in map(to_katakana, self.blocklisted_words): + return True + if self._should_skip_numbers and re.fullmatch(self._NUMBERS, word): + return True + return False @enum.unique @@ -67,29 +67,29 @@ class ReadingsDiscardMode(enum.Enum): class PitchAndFuriganaCommon(WordBlockListManager): @property def maximum_results(self) -> int: - return int(self['maximum_results']) + return int(self["maximum_results"]) @property def reading_separator(self) -> str: - return self['reading_separator'] + return self["reading_separator"] @property def discard_mode(self) -> ReadingsDiscardMode: - return ReadingsDiscardMode[self['discard_mode']] + return ReadingsDiscardMode[self["discard_mode"]] @final class FuriganaConfigView(PitchAndFuriganaCommon): - _view_key = 'furigana' + _view_key = "furigana" @property def prefer_literal_pronunciation(self) -> bool: - return self['prefer_literal_pronunciation'] is True + return self["prefer_literal_pronunciation"] is True @property def mecab_only(self) -> list[str]: """Words that shouldn't be looked up in the accent dictionary.""" - return split_words(self['mecab_only']) + return split_words(self["mecab_only"]) def can_lookup_in_db(self, word: str) -> bool: return self.maximum_results > 1 and word not in self.mecab_only @@ -97,48 +97,48 @@ def can_lookup_in_db(self, word: str) -> bool: @final class PitchConfigView(PitchAndFuriganaCommon): - _view_key = 'pitch_accent' + _view_key = "pitch_accent" @property def lookup_shortcut(self) -> str: - return self['lookup_shortcut'] + return self["lookup_shortcut"] @property def output_hiragana(self) -> bool: - return self['output_hiragana'] is True + return self["output_hiragana"] is True @property def kana_lookups(self) -> bool: - return self['kana_lookups'] is True + return self["kana_lookups"] is True @property def word_separator(self) -> str: - return self['word_separator'] + return self["word_separator"] @property def style(self) -> PitchPatternStyle: - return PitchPatternStyle[self['style']] + return PitchPatternStyle[self["style"]] @final class ContextMenuConfigView(ConfigViewBase): - _view_key = 'context_menu' + _view_key = "context_menu" @property def generate_furigana(self) -> bool: - return self['generate_furigana'] is True + return self["generate_furigana"] is True @property def to_katakana(self) -> bool: - return self['to_katakana'] is True + return self["to_katakana"] is True @property def to_hiragana(self) -> bool: - return self['to_hiragana'] is True + return self["to_hiragana"] is True @property def literal_pronunciation(self) -> bool: - return self['literal_pronunciation'] is True + return self["literal_pronunciation"] is True class ToolbarButtonConfig(NamedTuple): @@ -149,7 +149,7 @@ class ToolbarButtonConfig(NamedTuple): @final class ToolbarConfigView(ConfigViewBase): - _view_key = 'toolbar' + _view_key = "toolbar" def __getitem__(self, item) -> ToolbarButtonConfig: try: @@ -159,113 +159,113 @@ def __getitem__(self, item) -> ToolbarButtonConfig: @property def generate_all_button(self) -> ToolbarButtonConfig: - return self['generate_all_button'] + return self["generate_all_button"] @property def regenerate_all_button(self) -> ToolbarButtonConfig: - return self['regenerate_all_button'] + return self["regenerate_all_button"] @property def furigana_button(self) -> ToolbarButtonConfig: - return self['furigana_button'] + return self["furigana_button"] @property def hiragana_button(self) -> ToolbarButtonConfig: - return self['hiragana_button'] + return self["hiragana_button"] @property def clean_furigana_button(self) -> ToolbarButtonConfig: - return self['clean_furigana_button'] + return self["clean_furigana_button"] @property def audio_search_button(self) -> ToolbarButtonConfig: - return self['audio_search_button'] + return self["audio_search_button"] @property def add_definition_button(self) -> ToolbarButtonConfig: - return self['add_definition_button'] + return self["add_definition_button"] @final class AudioSettingsConfigView(ConfigViewBase): - _view_key = 'audio_settings' + _view_key = "audio_settings" @property def dictionary_download_timeout(self) -> int: - return self['dictionary_download_timeout'] + return self["dictionary_download_timeout"] @property def audio_download_timeout(self) -> int: - return self['audio_download_timeout'] + return self["audio_download_timeout"] @property def attempts(self) -> int: - return self['attempts'] + return self["attempts"] @property def maximum_results(self) -> int: - return self['maximum_results'] + return self["maximum_results"] @property def ignore_inflections(self) -> bool: - return bool(self['ignore_inflections']) + return bool(self["ignore_inflections"]) @property def stop_if_one_source_has_results(self) -> bool: - return bool(self['stop_if_one_source_has_results']) + return bool(self["stop_if_one_source_has_results"]) @property def search_dialog_dest_field_name(self) -> str: - return self['search_dialog_dest_field_name'] + return self["search_dialog_dest_field_name"] @search_dialog_dest_field_name.setter def search_dialog_dest_field_name(self, field_name: str): - self['search_dialog_dest_field_name'] = field_name + self["search_dialog_dest_field_name"] = field_name @property def search_dialog_src_field_name(self) -> str: - return self['search_dialog_src_field_name'] + return self["search_dialog_src_field_name"] @search_dialog_src_field_name.setter def search_dialog_src_field_name(self, field_name: str): - self['search_dialog_src_field_name'] = field_name + self["search_dialog_src_field_name"] = field_name @property def tag_separator(self) -> str: - return self['tag_separator'] + return self["tag_separator"] @final class DefinitionsConfigView(ConfigViewBase): - _view_key = 'definitions' + _view_key = "definitions" @property def timeout(self) -> int: - return self['timeout'] + return self["timeout"] @property def remove_marks(self) -> bool: - return bool(self['remove_marks']) + return bool(self["remove_marks"]) @property def dict_name(self) -> DictName: - return DictName[self['dict_name']] + return DictName[self["dict_name"]] @property def search_type(self) -> SearchType: - return SearchType[self['search_type']] + return SearchType[self["search_type"]] @property def source(self) -> str: - return self['source'] + return self["source"] @property def destination(self) -> str: - return self['destination'] + return self["destination"] @property def behavior(self) -> AddDefBehavior: - return AddDefBehavior[self['behavior']] + return AddDefBehavior[self["behavior"]] @final @@ -280,11 +280,11 @@ def __init__(self): self._definitions = DefinitionsConfigView() def iter_profiles(self) -> Iterable[Profile]: - for profile_dict in self['profiles']: + for profile_dict in self["profiles"]: # In case new options are added or removed in the future, # load default settings first, then overwrite them. yield dataclasses.replace( - default := Profile.get_default(profile_dict['mode']), + default := Profile.get_default(profile_dict["mode"]), **{key: profile_dict[key] for key in (dataclasses.asdict(default).keys() & profile_dict.keys())}, ) @@ -298,11 +298,11 @@ def audio_settings(self): @property def audio_sources(self): - return self['audio_sources'] + return self["audio_sources"] @property def cache_lookups(self) -> int: - return int(self['cache_lookups']) + return int(self["cache_lookups"]) @property def furigana(self) -> FuriganaConfigView: diff --git a/context_menu.py b/context_menu.py index b43299c..d975332 100644 --- a/context_menu.py +++ b/context_menu.py @@ -18,7 +18,7 @@ class ContextMenuAction(abc.ABC): - subclasses: list[type['ContextMenuAction']] = [] + subclasses: list[type["ContextMenuAction"]] = [] shown_when_not_editing = False def __init_subclass__(cls, **kwargs): diff --git a/definitions.py b/definitions.py index 970a2a0..c880391 100644 --- a/definitions.py +++ b/definitions.py @@ -69,9 +69,9 @@ def add_definition(self, editor: Editor) -> None: for field_name in (self._config.source, self._config.destination): if field_name not in editor.note: - return tooltip(f"Note doesn't have field \"{field_name}\".") + return tooltip(f'Note doesn\'t have field "{field_name}".') if not editor.note[self._config.source]: - return tooltip(f"Source field \"{self._config.source}\" is empty.") + return tooltip(f'Source field "{self._config.source}" is empty.') progress = create_progress_dialog(editor.parentWindow) diff --git a/gui.py b/gui.py index 45276b9..2e0faea 100644 --- a/gui.py +++ b/gui.py @@ -69,8 +69,8 @@ def relevant_field_names(note_type_name_fuzzy: Optional[str] = None) -> Iterable """ for model in mw.col.models.all_names_and_ids(): if not note_type_name_fuzzy or note_type_name_fuzzy.lower() in model.name.lower(): - for field in mw.col.models.get(model.id)['flds']: - yield field['name'] + for field in mw.col.models.get(model.id)["flds"]: + yield field["name"] class EditableSelector(QComboBox): @@ -195,14 +195,14 @@ def set_enabled_callers(self, callers: Collection[TaskCaller]): return self.setCheckedData(callers) def comma_separated_callers(self): - return ','.join(caller.name for caller in self.checkedData()) + return ",".join(caller.name for caller in self.checkedData()) class ProfileEditForm(QGroupBox): _subclasses_map = {} # e.g. ProfileFurigana => FuriganaProfileEditForm def __init_subclass__(cls, **kwargs): - profile_class: type(Profile) = kwargs.pop('profile_class') # suppresses ide warning + profile_class: type(Profile) = kwargs.pop("profile_class") # suppresses ide warning super().__init_subclass__(**kwargs) cls._subclasses_map[profile_class] = cls @@ -243,15 +243,9 @@ def _add_tooltips(self): "Profile will be triggered for Note Type names that contain this string.\n" "Note Type name matching is case-insensitive." ) - self._form.source.setToolTip( - "Name of the field to get data from, i.e. the raw expression." - ) - self._form.destination.setToolTip( - "Name of the field to place generated data to." - ) - self._form.triggered_by.setToolTip( - "Names of Anki actions that can trigger this profile's task." - ) + self._form.source.setToolTip("Name of the field to get data from, i.e. the raw expression.") + self._form.destination.setToolTip("Name of the field to place generated data to.") + self._form.triggered_by.setToolTip("Names of Anki actions that can trigger this profile's task.") self._form.split_morphemes.setToolTip( "If the source field contains multiple words, try to identify and parse each word.\n" "Recommended to disable for vocabulary fields." @@ -282,7 +276,7 @@ def _make_layout(self) -> QLayout: return layout def _repopulate_fields(self, profile: Optional[Profile] = None): - for key in ('source', 'destination',): + for key in ("source", "destination"): widget: QComboBox = self._form.__dict__[key] current_text = dataclasses.asdict(profile)[key] if profile else widget.currentText() widget.clear() @@ -311,7 +305,7 @@ class AudioProfileEditForm(ProfileEditForm, profile_class=ProfileAudio): class ProfileEdit(QWidget): def __init_subclass__(cls, **kwargs): - cls._profile_class: type(Profile) = kwargs.pop('profile_class') # suppresses ide warning + cls._profile_class: type(Profile) = kwargs.pop("profile_class") # suppresses ide warning super().__init_subclass__(**kwargs) def __init__(self, *args, **kwargs): @@ -379,10 +373,10 @@ def _adjust_font_size(self): def set_values(self, values: list[str]): if values: - self.setPlainText(','.join(dict.fromkeys(values))) + self.setPlainText(",".join(dict.fromkeys(values))) def as_text(self) -> str: - return ','.join(dict.fromkeys(filter(bool, self.toPlainText().replace(' ', '').split('\n')))) + return ",".join(dict.fromkeys(filter(bool, self.toPlainText().replace(" ", "").split("\n")))) class SettingsForm(QWidget): @@ -459,10 +453,10 @@ class DefinitionsSettingsForm(SettingsForm): def _add_widgets(self): super()._add_widgets() self._widgets.source = FieldNameSelector( - initial_value=self._config.source + initial_value=self._config.source, ) self._widgets.destination = FieldNameSelector( - initial_value=self._config.destination + initial_value=self._config.destination, ) self._widgets.dict_name = EnumSelectCombo( enum_type=DictName, @@ -471,27 +465,27 @@ def _add_widgets(self): ) self._widgets.search_type = EnumSelectCombo( enum_type=SearchType, - initial_value=self._config.search_type + initial_value=self._config.search_type, ) self._widgets.behavior = EnumSelectCombo( enum_type=AddDefBehavior, - initial_value=self._config.behavior + initial_value=self._config.behavior, ) self._widgets.timeout = NarrowSpinBox( - initial_value=self._config.timeout + initial_value=self._config.timeout, ) def _add_tooltips(self): super()._add_tooltips() self._widgets.timeout.setToolTip( - "Download timeout in seconds." + "Download timeout in seconds.", ) self._widgets.remove_marks.setToolTip( "Strip all tags from definitions.\n" "Usually tags simply repeat the headword and are not needed." ) self._widgets.dict_name.setToolTip( - "Dictionary to fetch definitions from." + "Dictionary to fetch definitions from.", ) self._widgets.search_type.setToolTip( "How to search.\n" @@ -554,14 +548,16 @@ class PitchSettingsForm(MultiColumnSettingsForm): def _add_widgets(self): super()._add_widgets() - self._widgets.maximum_results = NarrowSpinBox(initial_value=self._config.maximum_results) + self._widgets.maximum_results = NarrowSpinBox( + initial_value=self._config.maximum_results, + ) self._widgets.discard_mode = EnumSelectCombo( enum_type=ReadingsDiscardMode, - initial_value=self._config.discard_mode + initial_value=self._config.discard_mode, ) self._widgets.style = EnumSelectCombo( enum_type=PitchPatternStyle, - initial_value=self._config.style + initial_value=self._config.style, ) self._widgets.reading_separator = NarrowLineEdit(self._config.reading_separator) self._widgets.word_separator = NarrowLineEdit(self._config.word_separator) @@ -605,7 +601,7 @@ def _add_tooltips(self): ) self._widgets.style.setToolTip( "Style of pitch accent patterns.\n" - "If set to \"none\", you can configure your own styles\n" + 'If set to "none", you can configure your own styles\n' "in the Styling section of your card type using CSS class names." ) @@ -619,7 +615,7 @@ def _add_widgets(self): self._widgets.maximum_results = NarrowSpinBox(initial_value=self._config.maximum_results) self._widgets.discard_mode = EnumSelectCombo( enum_type=ReadingsDiscardMode, - initial_value=self._config.discard_mode + initial_value=self._config.discard_mode, ) self._widgets.reading_separator = NarrowLineEdit(self._config.reading_separator) self._widgets.blocklisted_words = WordsEdit(initial_values=self._config.blocklisted_words) @@ -864,7 +860,7 @@ def _add_tooltips(self): class SettingsDialog(QDialog): - name = 'Japanese Options' + name = "Japanese Options" def __init__(self, *args): super().__init__(*args) @@ -893,9 +889,9 @@ def __init__(self, *args): # Finish layout self._tabs = QTabWidget() self._button_box = QDialogButtonBox( - QDialogButtonBox.StandardButton.Ok | - QDialogButtonBox.StandardButton.Cancel | - QDialogButtonBox.StandardButton.Help + QDialogButtonBox.StandardButton.Ok + | QDialogButtonBox.StandardButton.Cancel + | QDialogButtonBox.StandardButton.Help ) self._setup_tabs() self._add_tooltips() @@ -947,7 +943,7 @@ def _setup_tabs(self): def _setup_ui(self) -> None: cast(QDialog, self).setWindowModality(Qt.WindowModality.ApplicationModal) - cast(QDialog, self).setWindowTitle(f'{ADDON_SERIES} {self.name}') + cast(QDialog, self).setWindowTitle(f"{ADDON_SERIES} {self.name}") self.setMinimumSize(800, 600) tweak_window(self) self.setLayout(self.make_layout()) @@ -977,21 +973,18 @@ def make_layout(self) -> QLayout: return layout def accept(self) -> None: - cfg['pitch_accent'].update(self._pitch_settings.as_dict()) - cfg['furigana'].update(self._furigana_settings.as_dict()) - cfg['context_menu'].update(self._context_menu_settings.as_dict()) - cfg['definitions'].update(self._definitions_settings.as_dict()) - cfg['toolbar'].update(self._toolbar_settings.as_dict()) - cfg['profiles'] = [ + cfg["pitch_accent"].update(self._pitch_settings.as_dict()) + cfg["furigana"].update(self._furigana_settings.as_dict()) + cfg["context_menu"].update(self._context_menu_settings.as_dict()) + cfg["definitions"].update(self._definitions_settings.as_dict()) + cfg["toolbar"].update(self._toolbar_settings.as_dict()) + cfg["profiles"] = [ *self._furigana_profiles_edit.as_list(), *self._pitch_profiles_edit.as_list(), *self._audio_profiles_edit.as_list(), ] - cfg['audio_sources'] = [ - dataclasses.asdict(source) - for source in self._audio_sources_edit.iterateConfigs() - ] - cfg['audio_settings'].update(self._audio_settings.as_dict()) + cfg["audio_sources"] = [dataclasses.asdict(source) for source in self._audio_sources_edit.iterateConfigs()] + cfg["audio_settings"].update(self._audio_settings.as_dict()) # Write the new data to disk cfg.write_config() self._accents_override.save_to_disk() @@ -1002,7 +995,7 @@ def accept(self) -> None: def add_settings_action(root_menu: QMenu): - menu_action = QAction(f'{SettingsDialog.name}...', root_menu) + menu_action = QAction(f"{SettingsDialog.name}...", root_menu) qconnect(menu_action.triggered, lambda: SettingsDialog(mw)) root_menu.addAction(menu_action) diff --git a/helpers/__init__.py b/helpers/__init__.py index 1fb6687..9aafd02 100644 --- a/helpers/__init__.py +++ b/helpers/__init__.py @@ -12,32 +12,32 @@ # Running as a standalone script. ADDON_SERIES = "TEST" -LONG_VOWEL_MARK = 'ー' +LONG_VOWEL_MARK = "ー" ADDON_NAME = f"{ADDON_SERIES} Japanese" def ui_translate(key: str) -> str: - return key.capitalize().replace('_', ' ') + return key.capitalize().replace("_", " ") T = TypeVar("T") def split_list(input_list: Sequence[T], n_chunks: int) -> Iterable[Sequence[T]]: - """ Splits a list into N chunks. """ + """Splits a list into N chunks.""" chunk_size = math.ceil(len(input_list) / n_chunks) for i in range(0, len(input_list), chunk_size): - yield input_list[i:i + chunk_size] + yield input_list[i : i + chunk_size] def main(): - assert (list(split_list([1, 2, 3], n_chunks=2)) == [[1, 2], [3]]) - assert (list(split_list([1, 2, 3, 4], n_chunks=2)) == [[1, 2], [3, 4]]) - assert (list(split_list([1, 2, 3, 4, 5], n_chunks=2)) == [[1, 2, 3], [4, 5]]) - assert (list(split_list([1, 2, 3, 4, 5, 6, 7], n_chunks=2)) == [[1, 2, 3, 4], [5, 6, 7]]) - assert (list(split_list([1, 2, 3, 4, 5, 6, 7, 8], n_chunks=3)) == [[1, 2, 3], [4, 5, 6], [7, 8]]) + assert list(split_list([1, 2, 3], n_chunks=2)) == [[1, 2], [3]] + assert list(split_list([1, 2, 3, 4], n_chunks=2)) == [[1, 2], [3, 4]] + assert list(split_list([1, 2, 3, 4, 5], n_chunks=2)) == [[1, 2, 3], [4, 5]] + assert list(split_list([1, 2, 3, 4, 5, 6, 7], n_chunks=2)) == [[1, 2, 3, 4], [5, 6, 7]] + assert list(split_list([1, 2, 3, 4, 5, 6, 7, 8], n_chunks=3)) == [[1, 2, 3], [4, 5, 6], [7, 8]] print("Passed.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/audio_manager.py b/helpers/audio_manager.py index f8d257e..4cb6bce 100644 --- a/helpers/audio_manager.py +++ b/helpers/audio_manager.py @@ -20,7 +20,7 @@ AudioSourceConfig, AudioManagerHttpClient, FileUrlData, - AudioSettingsProtocol + AudioSettingsProtocol, ) from ..pitch_accents.common import split_pitch_numbers from .audio_json_schema import FileInfo @@ -33,7 +33,7 @@ AudioSourceConfig, AudioManagerHttpClient, FileUrlData, - AudioSettingsProtocol + AudioSettingsProtocol, ) from pitch_accents.common import split_pitch_numbers from helpers.audio_json_schema import FileInfo @@ -55,7 +55,7 @@ def file_exists(file_path: str): def cut_to_anki_size(text: str) -> str: - return text.encode('utf-8')[:MAX_LEN_BYTES].decode('utf-8', errors='ignore') + return text.encode("utf-8")[:MAX_LEN_BYTES].decode("utf-8", errors="ignore") def normalize_filename(text: str) -> str: @@ -64,9 +64,10 @@ def normalize_filename(text: str) -> str: ensure there are no questionable characters that some OSes may panic from. """ import unicodedata + text = cut_to_anki_size(text) - text = unicodedata.normalize('NFC', text) - text = re.sub(RE_FILENAME_PROHIBITED, '_', text) + text = unicodedata.normalize("NFC", text) + text = re.sub(RE_FILENAME_PROHIBITED, "_", text) return text.strip() @@ -76,7 +77,7 @@ def norm_pitch_numbers(s: str) -> str: When an audio file has more than one accent, it basically represents two or more words chained together. E.g., かも-知れない (1-0), 黒い-霧 (2-0), 作用,反作用の,法則 (1-3-0), 八幡,大菩薩 (2-3), 入り代わり-立ち代わり (0-0), 七転,八起き (3-1) """ - return '-'.join(split_pitch_numbers(s)) or '?' + return "-".join(split_pitch_numbers(s)) or "?" @dataclasses.dataclass @@ -100,7 +101,7 @@ def media_dir(self) -> str: ) def join(self, *args) -> Union[str, bytes]: - """ Join multiple paths. """ + """Join multiple paths.""" if self.is_local: # Local paths are platform-dependent. return os.path.join(*args) @@ -171,16 +172,16 @@ class AddonConfigProtocol(Protocol): audio_sources: dict audio_settings: AudioSettingsProtocol - def iter_audio_sources(self): - ... + def iter_audio_sources(self): ... class AudioSourceManager: def __init__( - self, config: AddonConfigProtocol, - http_client: Optional[AudioManagerHttpClient], - db: Sqlite3Buddy, - audio_sources: list[AudioSource], + self, + config: AddonConfigProtocol, + http_client: Optional[AudioManagerHttpClient], + db: Sqlite3Buddy, + audio_sources: list[AudioSource], ): self._config = config self._http_client = http_client @@ -245,10 +246,10 @@ def _resolve_file(self, source: AudioSource, file: BoundFile) -> FileUrlData: file_info: FileInfo = self._db.get_file_info(source.name, file.file_name) # Append either pitch pattern or kana reading, preferring pitch pattern. - if file_info['pitch_pattern']: - components.append(to_katakana(file_info['pitch_pattern'])) - elif file_info['kana_reading']: - components.append(to_katakana(file_info['kana_reading'])) + if file_info["pitch_pattern"]: + components.append(to_katakana(file_info["pitch_pattern"])) + elif file_info["kana_reading"]: + components.append(to_katakana(file_info["kana_reading"])) # If pitch number is present, append it after reading. if file_info['pitch_number']: @@ -262,19 +263,19 @@ def _resolve_file(self, source: AudioSource, file: BoundFile) -> FileUrlData: desired_filename=desired_filename, word=file.headword, source_name=source.name, - reading=(file_info['kana_reading'] or ""), - pitch_number=(file_info['pitch_number'] or "?"), + reading=(file_info["kana_reading"] or ""), + pitch_number=(file_info["pitch_number"] or "?"), ) def _read_local_json(self, source: AudioSource): - if source.url.endswith('.zip'): + if source.url.endswith(".zip"): # Read from a zip file that is expected to contain a json file with audio source data. with zipfile.ZipFile(source.url) as zip_in: print(f"Reading local zip audio source: {source.url}") self.db.insert_data(source.name, json.loads(read_zip(zip_in, source))) else: # Read an uncompressed json file. - with open(source.url, encoding='utf8') as f: + with open(source.url, encoding="utf8") as f: print(f"Reading local json audio source: {source.url}") self.db.insert_data(source.name, json.load(f)) @@ -290,7 +291,7 @@ def _download_remote_json(self, source: AudioSource): def _get_file(self, file: FileUrlData) -> bytes: if os.path.isfile(file.url): - with open(file.url, 'rb') as f: + with open(file.url, "rb") as f: return f.read() else: return self._http_client.download(file) @@ -393,11 +394,11 @@ def main(): print(f"{stats.unique_headwords=}") for source_stats in stats.sources: print(source_stats) - for file in aud_mgr.search_word('ひらがな'): + for file in aud_mgr.search_word("ひらがな"): print(file) for source in aud_mgr.audio_sources: print(f"source {source.name} media dir {source.media_dir}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/common_kana.py b/helpers/common_kana.py index a44b3d6..ee2be7f 100644 --- a/helpers/common_kana.py +++ b/helpers/common_kana.py @@ -40,25 +40,25 @@ def adjust_to_inflection(raw_word: str, headword: str, headword_reading: str) -> def main(): - assert (adjust_to_inflection('食べた', '食べる', 'たべる') == 'たべた') - assert (adjust_to_inflection('跪いた', '跪く', 'ひざまずく') == 'ひざまずいた') - assert (adjust_to_inflection('跪かなかった', '跪く', 'ひざまずく') == 'ひざまずかなかった') - assert (adjust_to_inflection('安くなかった', '安い', 'やすい') == 'やすくなかった') - assert (adjust_to_inflection('繋りたい', '繋る', 'つながる') == 'つながりたい') - assert (adjust_to_inflection('言い方', '言い方', 'いいかた') == 'いいかた') - assert (adjust_to_inflection('やり遂げさせられない', 'やり遂げる', 'やりとげる') == 'やりとげさせられない') - assert (adjust_to_inflection('死ん', '死ぬ', 'しぬ') == 'しん') - assert (adjust_to_inflection('たべた', 'たべる', 'たべる') == 'たべた') - assert (adjust_to_inflection('カタカナ', 'カタカナ', 'かたかな') == 'カタカナ') - assert (adjust_to_inflection("相合い傘", "相合い傘", "あいあいがさ") == "あいあいがさ") - assert (adjust_to_inflection("いた目", "板目", "いため") == "いため") - assert (adjust_to_inflection("軽そう", "軽装", "けいそー") == "けいそー") - assert (adjust_to_inflection("唸りました", "唸る", "うなる") == "うなりました") - assert (adjust_to_inflection("可愛くない", "可愛い", "かわいい") == "かわいくない") - assert (adjust_to_inflection("かわいくない", "可愛い", "かわいい") == "かわいくない") + assert adjust_to_inflection("食べた", "食べる", "たべる") == "たべた" + assert adjust_to_inflection("跪いた", "跪く", "ひざまずく") == "ひざまずいた" + assert adjust_to_inflection("跪かなかった", "跪く", "ひざまずく") == "ひざまずかなかった" + assert adjust_to_inflection("安くなかった", "安い", "やすい") == "やすくなかった" + assert adjust_to_inflection("繋りたい", "繋る", "つながる") == "つながりたい" + assert adjust_to_inflection("言い方", "言い方", "いいかた") == "いいかた" + assert adjust_to_inflection("やり遂げさせられない", "やり遂げる", "やりとげる") == "やりとげさせられない" + assert adjust_to_inflection("死ん", "死ぬ", "しぬ") == "しん" + assert adjust_to_inflection("たべた", "たべる", "たべる") == "たべた" + assert adjust_to_inflection("カタカナ", "カタカナ", "かたかな") == "カタカナ" + assert adjust_to_inflection("相合い傘", "相合い傘", "あいあいがさ") == "あいあいがさ" + assert adjust_to_inflection("いた目", "板目", "いため") == "いため" + assert adjust_to_inflection("軽そう", "軽装", "けいそー") == "けいそー" + assert adjust_to_inflection("唸りました", "唸る", "うなる") == "うなりました" + assert adjust_to_inflection("可愛くない", "可愛い", "かわいい") == "かわいくない" + assert adjust_to_inflection("かわいくない", "可愛い", "かわいい") == "かわいくない" print("Passed.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/file_ops.py b/helpers/file_ops.py index 00048a0..43cc648 100644 --- a/helpers/file_ops.py +++ b/helpers/file_ops.py @@ -17,26 +17,26 @@ def walk_parents(current_dir: str) -> Iterable[str]: def resolve_relative_path(*paths) -> str: - """ Return path to file inside the add-on's dir. """ + """Return path to file inside the add-on's dir.""" for parent_dir in walk_parents(__file__): if os.path.basename(parent_dir) == THIS_ADDON_MODULE: return os.path.join(parent_dir, *paths) def touch(path): - with open(path, 'a'): + with open(path, "a"): os.utime(path, None) def find_config_json() -> str: - """ Used when testing/debugging. """ + """Used when testing/debugging.""" for parent_dir in walk_parents(__file__): if os.path.isfile(path := os.path.join(parent_dir, "config.json")): return path def user_files_dir() -> str: - """ Return path to the user files directory. """ + """Return path to the user files directory.""" for parent_dir in walk_parents(__file__): if os.path.isdir(dir_path := os.path.join(parent_dir, "user_files")): return dir_path @@ -58,6 +58,6 @@ def open_file(path: str) -> None: QDesktopServices.openUrl(QUrl(f"file://{path}")) -if __name__ == '__main__': +if __name__ == "__main__": print(user_files_dir()) - print(open_file('/etc/hosts')) + print(open_file("/etc/hosts")) diff --git a/helpers/goldendict_lookups.py b/helpers/goldendict_lookups.py index 86affbf..fded755 100644 --- a/helpers/goldendict_lookups.py +++ b/helpers/goldendict_lookups.py @@ -41,5 +41,5 @@ def main(): lookup_goldendict("肉じゃが") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/inflections.py b/helpers/inflections.py index 6d0cc1d..64df653 100644 --- a/helpers/inflections.py +++ b/helpers/inflections.py @@ -50,5 +50,5 @@ def main(): print("Ok.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/mingle_readings.py b/helpers/mingle_readings.py index 448ddf2..1c05b18 100644 --- a/helpers/mingle_readings.py +++ b/helpers/mingle_readings.py @@ -6,7 +6,7 @@ from collections.abc import Iterable from typing import NamedTuple, Final, Union -MULTIPLE_READING_SEP: Final[str] = '・' +MULTIPLE_READING_SEP: Final[str] = "・" class SplitFurigana(NamedTuple): @@ -31,7 +31,7 @@ class WordReading(NamedTuple): def strip_non_jp_furigana(expr: str) -> str: """Non-japanese furigana is not real furigana. Strip it.""" - return re.sub(r'\[[^ぁ-ゖァ-ヺァ-ン]+]', '', expr) + return re.sub(r"\[[^ぁ-ゖァ-ヺァ-ン]+]", "", expr) def find_head_reading_suffix(text: str) -> Union[SplitFurigana, NoFurigana]: @@ -41,13 +41,13 @@ def find_head_reading_suffix(text: str) -> Union[SplitFurigana, NoFurigana]: """ furigana_start, furigana_end = -1, -1 for i, c in enumerate(text): - if c == '[': + if c == "[": furigana_start = i - if c == ']': + if c == "]": furigana_end = i break if 0 < furigana_start < furigana_end: - return SplitFurigana(text[:furigana_start], text[furigana_start + 1:furigana_end], text[furigana_end + 1:]) + return SplitFurigana(text[:furigana_start], text[furigana_start + 1 : furigana_end], text[furigana_end + 1 :]) else: return NoFurigana(text) @@ -78,9 +78,9 @@ def decompose_word(text: str) -> SplitFurigana: def tie_inside_furigana(s: str) -> str: def fixup(m: re.Match): - return m.group().replace(' ', MULTIPLE_READING_SEP) + return m.group().replace(" ", MULTIPLE_READING_SEP) - return re.sub(r'\[[^\[\]]+?]', fixup, s) + return re.sub(r"\[[^\[\]]+?]", fixup, s) def whitespace_split(furigana_notation: str) -> list[str]: @@ -98,15 +98,15 @@ def word_reading(text: str) -> WordReading: for split in map(decompose_word, whitespace_split(text)): word.append(split.head + split.suffix) reading.append(split.reading + split.suffix) - word, reading = ''.join(word), ''.join(reading) - return WordReading(word, reading) if (reading and word != reading) else WordReading(text, '') + word, reading = "".join(word), "".join(reading) + return WordReading(word, reading) if (reading and word != reading) else WordReading(text, "") def pairs(seq: list): yield from zip(seq, seq[1:]) -def mingle_readings(words_furigana: list[str], *, sep: str = ', ') -> str: +def mingle_readings(words_furigana: list[str], *, sep: str = ", ") -> str: """ Takes several furigana notations, packs them into one, with readings separated by sep. @@ -126,8 +126,8 @@ def mingle_readings(words_furigana: list[str], *, sep: str = ', ') -> str: for first, *rest in zip(*split): first = decompose_word(first) words_furigana = sep.join(dict.fromkeys(word.reading for word in (first, *map(decompose_word, rest)))) - packs.append(f' {first.head}[{words_furigana}]{first.suffix}' if words_furigana != first.head else first.head) - return ''.join(packs) + packs.append(f" {first.head}[{words_furigana}]{first.suffix}" if words_furigana != first.head else first.head) + return "".join(packs) # Debug @@ -135,33 +135,33 @@ def mingle_readings(words_furigana: list[str], *, sep: str = ', ') -> str: def main(): - assert (whitespace_split(' 有[あ]り 得[う]る') == ['有[あ]り', '得[う]る']) + assert whitespace_split(" 有[あ]り 得[う]る") == ["有[あ]り", "得[う]る"] - assert (strip_non_jp_furigana('悪[わる][1223]い[2]') == '悪[わる]い') + assert strip_non_jp_furigana("悪[わる][1223]い[2]") == "悪[わる]い" - assert (decompose_word('故郷[こきょう]') == SplitFurigana(head='故郷', reading='こきょう', suffix='')) - assert (decompose_word('有[あ]り') == SplitFurigana(head='有', reading='あ', suffix='り')) - assert (decompose_word('ひらがな') == SplitFurigana(head='ひらがな', reading='ひらがな', suffix='')) - assert (decompose_word('南[みなみ]千[ち]秋[あき]') == SplitFurigana(head='南千秋', reading='みなみちあき', suffix='')) + assert decompose_word("故郷[こきょう]") == SplitFurigana(head="故郷", reading="こきょう", suffix="") + assert decompose_word("有[あ]り") == SplitFurigana(head="有", reading="あ", suffix="り") + assert decompose_word("ひらがな") == SplitFurigana(head="ひらがな", reading="ひらがな", suffix="") + assert decompose_word("南[みなみ]千[ち]秋[あき]") == SplitFurigana(head="南千秋", reading="みなみちあき", suffix="") - assert (word_reading('有[あ]り 得[う]る') == WordReading(word='有り得る', reading='ありうる')) - assert (word_reading('有る') == WordReading(word='有る', reading='')) - assert (word_reading('お 前[まい
まえ
めえ]') == WordReading(word='お前', reading='おまい
まえ
めえ')) - assert ( - word_reading('もうお 金[かね]が 無[な]くなりました。') - == WordReading(word='もうお金が無くなりました。', reading='もうおかねがなくなりました。') + assert word_reading("有[あ]り 得[う]る") == WordReading(word="有り得る", reading="ありうる") + assert word_reading("有る") == WordReading(word="有る", reading="") + assert word_reading("お 前[まい
まえ
めえ]") == WordReading(word="お前", reading="おまい
まえ
めえ") + assert word_reading("もうお 金[かね]が 無[な]くなりました。") == WordReading( + word="もうお金が無くなりました。", reading="もうおかねがなくなりました。" ) + assert word_reading( + "妹[いもうと]は 自分[じぶん]の 我[わ]が 儘[まま]が 通[とお]らないと、すぐ 拗[す]ねる。" + ) == WordReading("妹は自分の我が儘が通らないと、すぐ拗ねる。", "いもうとはじぶんのわがままがとおらないと、すぐすねる。") + assert ( - word_reading("妹[いもうと]は 自分[じぶん]の 我[わ]が 儘[まま]が 通[とお]らないと、すぐ 拗[す]ねる。") - == WordReading("妹は自分の我が儘が通らないと、すぐ拗ねる。", "いもうとはじぶんのわがままがとおらないと、すぐすねる。") + mingle_readings([" 有[あ]り 得[う]る", " 有[あ]り 得[え]る", " 有[あ]り 得[え]る"]) == " 有[あ]り 得[う, え]る" ) - - assert (mingle_readings([' 有[あ]り 得[う]る', ' 有[あ]り 得[え]る', ' 有[あ]り 得[え]る']) == ' 有[あ]り 得[う, え]る') - assert (mingle_readings([' 故郷[こきょう]', ' 故郷[ふるさと]']) == ' 故郷[こきょう, ふるさと]') - assert (mingle_readings(['お 前[まえ]', 'お 前[めえ]']) == 'お 前[まえ, めえ]') - assert (mingle_readings([' 言[い]い 分[ぶん]', ' 言い分[いーぶん]']) == ' 言[い]い 分[ぶん]') + assert mingle_readings([" 故郷[こきょう]", " 故郷[ふるさと]"]) == " 故郷[こきょう, ふるさと]" + assert mingle_readings(["お 前[まえ]", "お 前[めえ]"]) == "お 前[まえ, めえ]" + assert mingle_readings([" 言[い]い 分[ぶん]", " 言い分[いーぶん]"]) == " 言[い]い 分[ぶん]" print("Passed.") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/profiles.py b/helpers/profiles.py index def608d..bcf8f32 100644 --- a/helpers/profiles.py +++ b/helpers/profiles.py @@ -40,8 +40,7 @@ def all_comma_separated_names(cls) -> str: class AnkiNoteProtocol(typing.Protocol): - def __contains__(self, key: str) -> bool: - ... + def __contains__(self, key: str) -> bool: ... @dataclasses.dataclass(frozen=True) diff --git a/helpers/sakura_client.py b/helpers/sakura_client.py index ced8d85..50af7cb 100644 --- a/helpers/sakura_client.py +++ b/helpers/sakura_client.py @@ -78,7 +78,7 @@ def fetch_def(self, headword: str, *, dict_name: DictName = None, search_type: S def _parse_result(self, html_page: str) -> str: soup = BeautifulSoup(html_page, "html.parser") - for node in soup.find_all('div', class_="content"): + for node in soup.find_all("div", class_="content"): del node["class"] self._trim_node(node) yield str(node).strip().replace("\n", DEF_SEP) @@ -110,5 +110,5 @@ def main(): print(client.fetch_def("故郷")) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/sqlite3_buddy.py b/helpers/sqlite3_buddy.py index 5dee4d8..c2fc762 100644 --- a/helpers/sqlite3_buddy.py +++ b/helpers/sqlite3_buddy.py @@ -28,6 +28,7 @@ class BoundFile(NamedTuple): """ Represents an sqlite query result. """ + headword: str file_name: str source_name: str @@ -38,7 +39,8 @@ def build_or_clause(repeated_field_name: str, count: int) -> str: class Sqlite3Buddy: - """ Db holds three tables: ('meta', 'headwords', 'files') """ + """Db holds three tables: ('meta', 'headwords', 'files')""" + _db_path = os.path.join(user_files_dir(), CURRENT_DB.name) def __init__(self): @@ -89,7 +91,7 @@ def get_media_dir_abs(self, source_name: str) -> Optional[str]: def get_media_dir_rel(self, source_name: str) -> str: cur = self._con.cursor() query = """ SELECT media_dir FROM meta WHERE source_name = ? LIMIT 1; """ - return ''.join(cur.execute(query, (source_name,)).fetchone()) + return "".join(cur.execute(query, (source_name,)).fetchone()) def get_original_url(self, source_name: str) -> Optional[str]: cur = self._con.cursor() @@ -105,7 +107,7 @@ def set_original_url(self, source_name: str, new_url: str) -> None: self._con.commit() def is_source_cached(self, source_name: str) -> bool: - """ True if audio source with this name has been cached already. """ + """True if audio source with this name has been cached already.""" cur = self._con.cursor() queries = ( """ SELECT 1 FROM meta WHERE source_name = ? LIMIT 1; """, @@ -129,13 +131,13 @@ def insert_data(self, source_name: str, data: SourceIndex): query, ( source_name, - data['meta']['name'], - data['meta']['year'], - data['meta']['version'], + data["meta"]["name"], + data["meta"]["year"], + data["meta"]["version"], None, - data['meta']['media_dir'], - data['meta'].get('media_dir_abs'), # Possibly unset - ) + data["meta"]["media_dir"], + data["meta"].get("media_dir_abs"), # Possibly unset + ), ) # Insert headwords and file names query = """ @@ -147,9 +149,9 @@ def insert_data(self, source_name: str, data: SourceIndex): query, ( (source_name, headword, file_name) - for headword, file_list in data['headwords'].items() + for headword, file_list in data["headwords"].items() for file_name in file_list - ) + ), ) # Insert readings and accent info. query = """ @@ -163,12 +165,12 @@ def insert_data(self, source_name: str, data: SourceIndex): ( source_name, file_name, - file_info['kana_reading'], - file_info.get('pitch_pattern'), - file_info.get('pitch_number'), + file_info["kana_reading"], + file_info.get("pitch_pattern"), + file_info.get("pitch_number"), ) - for file_name, file_info in data['files'].items() - ) + for file_name, file_info in data["files"].items() + ), ) self._con.commit() @@ -178,7 +180,8 @@ def _prepare_tables(self): # and it can be arbitrary (e.g. NHK-2016). # `dictionary_name` is the name given to the audio source by its creator. # E.g. the NHK audio source provided by Ajatt-Tools has `dictionary_name` set to "NHK日本語発音アクセント新辞典". - cur.execute(""" + cur.execute( + """ CREATE TABLE IF NOT EXISTS meta( source_name TEXT primary key not null, dictionary_name TEXT not null, @@ -188,15 +191,19 @@ def _prepare_tables(self): media_dir TEXT not null, media_dir_abs TEXT ); - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE TABLE IF NOT EXISTS headwords( source_name TEXT not null, headword TEXT not null, file_name TEXT not null ); - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE TABLE IF NOT EXISTS files( source_name TEXT not null, file_name TEXT not null, @@ -204,16 +211,23 @@ def _prepare_tables(self): pitch_pattern TEXT, pitch_number TEXT ); - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE INDEX IF NOT EXISTS index_names ON meta(source_name); - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE INDEX IF NOT EXISTS index_file_names ON headwords(source_name, headword); - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE INDEX IF NOT EXISTS index_file_info ON files(source_name, file_name); - """) + """ + ) self._con.commit() cur.close() @@ -322,5 +336,5 @@ def main(): print(f"file count: {s.distinct_file_count(source_names)}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/sqlite_schema.py b/helpers/sqlite_schema.py index d741395..d7fd1de 100644 --- a/helpers/sqlite_schema.py +++ b/helpers/sqlite_schema.py @@ -18,6 +18,7 @@ class DbFileSchema(NamedTuple): the add-on opens a different sqlite3 file, thus avoiding errors that will otherwise occur due to mismatching tables, columns, etc. """ + prefix: str ver: str ext: str @@ -28,9 +29,9 @@ def name(self) -> str: def remove_deprecated_files(self) -> None: for file in os.scandir(user_files_dir()): - if file.name.startswith(self.prefix) and file.name.endswith(f'.{self.ext}'): + if file.name.startswith(self.prefix) and file.name.endswith(f".{self.ext}"): try: - schema = DbFileSchema(*file.name.split('.')) + schema = DbFileSchema(*file.name.split(".")) except (ValueError, TypeError): os.remove(file) print(f"Removed invalid database file: {file.path}") @@ -51,5 +52,5 @@ def main(): CURRENT_DB.remove_deprecated_files() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/tokens.py b/helpers/tokens.py index dae31cb..a4ba4c4 100644 --- a/helpers/tokens.py +++ b/helpers/tokens.py @@ -35,17 +35,17 @@ class ParseableToken(Token): def split_separators(expr: str) -> list[str]: - """ Split text by common separators (like / or ・) into separate words that can be looked up. """ + """Split text by common separators (like / or ・) into separate words that can be looked up.""" # Replace all typical separators with a space - expr = re.sub(NON_JP_REGEX, ' ', expr) # Remove non-Japanese characters - expr = re.sub(JP_SEP_REGEX, ' ', expr) # Remove Japanese punctuation - return expr.split(' ') + expr = re.sub(NON_JP_REGEX, " ", expr) # Remove non-Japanese characters + expr = re.sub(JP_SEP_REGEX, " ", expr) # Remove Japanese punctuation + return expr.split(" ") def clean_furigana(expr: str) -> str: """Remove text in [] used to represent furigana.""" - return re.sub(r' *([^ \[\]]+)\[[^\[\]]+]', r'\g<1>', expr, flags=RE_FLAGS) + return re.sub(r" *([^ \[\]]+)\[[^\[\]]+]", r"\g<1>", expr, flags=RE_FLAGS) def mark_non_jp_token(m: re.Match) -> str: @@ -61,7 +61,7 @@ def parts(expr: str, pattern: re.Pattern): def split_counters(text: str) -> Iterable[ParseableToken]: - """ Preemptively split text by words that mecab doesn't know how to parse. """ + """Preemptively split text by words that mecab doesn't know how to parse.""" for part in RE_COUNTERS.split(text): if part: yield ParseableToken(part) @@ -69,11 +69,11 @@ def split_counters(text: str) -> Iterable[ParseableToken]: def _tokenize(expr: str, *, split_regexes: Sequence[re.Pattern]) -> Iterable[Token]: if not split_regexes: - yield from split_counters(expr.replace(' ', '')) + yield from split_counters(expr.replace(" ", "")) else: for part in parts(expr, split_regexes[0]): if part: - if m := re.fullmatch(r'(.*?)', part, flags=RE_FLAGS): + if m := re.fullmatch(r"(.*?)", part, flags=RE_FLAGS): yield Token(m.group(1)) else: yield from _tokenize(part, split_regexes=split_regexes[1:]) @@ -100,7 +100,7 @@ def main(): print(clean_furigana("富竹[とみたけ]さん 今[いま] 扉[とびら]の 南京錠[なんきんじょう]いじってませんでした?")) expr = ( - "
Lorem ipsum dolor sit amet, [sound:はな.mp3] " + '
Lorem ipsum dolor sit amet, [sound:はな.mp3] ' "consectetur adipiscing
elit 私達は昨日ロンドンに着いた。おはよう。 Тест.
" "1月8日八日.彼女は12月のある寒い夜に亡くなった。" " 情報処理[じょうほうしょり]の 技術[ぎじゅつ]は 日々[にちにち,ひび] 進化[しんか]している。" @@ -109,5 +109,5 @@ def main(): print(f"{token.__class__.__name__}({token})") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/helpers/unique_files.py b/helpers/unique_files.py index 6147eec..4bf5f0e 100644 --- a/helpers/unique_files.py +++ b/helpers/unique_files.py @@ -59,5 +59,5 @@ def main(): print(file) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pitch_accents/acc_dict_mgr.py b/pitch_accents/acc_dict_mgr.py index 62525e7..8c51dde 100644 --- a/pitch_accents/acc_dict_mgr.py +++ b/pitch_accents/acc_dict_mgr.py @@ -29,7 +29,7 @@ def read_formatted_accents() -> AccentDict: """ acc_dict: AccentDict = collections.defaultdict(list) with open(FORMATTED_ACCENTS_TSV, newline="", encoding="utf-8") as f: - reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) + reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE) for word, kana, *pitch_data in reader: entry = FormattedEntry(kana, *pitch_data) for key in (word, kana): @@ -51,12 +51,12 @@ def accents_dict_init() -> AccentDict: if should_regenerate(FORMATTED_ACCENTS_PICKLE): print("The pickle needs updating.") acc_dict = read_formatted_accents() - with open(FORMATTED_ACCENTS_PICKLE, 'wb') as f: + with open(FORMATTED_ACCENTS_PICKLE, "wb") as f: # Pickle the dictionary using the highest protocol available. pickle.dump(acc_dict, f, pickle.HIGHEST_PROTOCOL) else: print("Reading from existing accents pickle.") - with open(FORMATTED_ACCENTS_PICKLE, 'rb') as f: + with open(FORMATTED_ACCENTS_PICKLE, "rb") as f: acc_dict = pickle.load(f) # Finally, patch with user-defined entries. @@ -84,7 +84,7 @@ def lookup(self, expr: str) -> Optional[Sequence[FormattedEntry]]: return self[variant] def reload_from_disk(self): - """ Reads pitch accents file from disk. """ + """Reads pitch accents file from disk.""" print("Reading pitch accents file...") QueryOp( parent=mw, @@ -96,7 +96,7 @@ def reload_from_disk(self): ).run_in_background() def _reload_dict(self, new_dict: AccentDict): - """ Reloads accent db (e.g. when the user changed settings). """ + """Reloads accent db (e.g. when the user changed settings).""" print("Reloading accent dictionary...") self._db.clear() self._db = new_dict @@ -110,5 +110,5 @@ def main(): print(f"{word}\t{entry.katakana_reading}\t{entry.pitch_number}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pitch_accents/basic_types.py b/pitch_accents/basic_types.py index ff5564a..f213161 100644 --- a/pitch_accents/basic_types.py +++ b/pitch_accents/basic_types.py @@ -17,10 +17,10 @@ from mecab_controller.kana_conv import to_hiragana, kana_to_moras from mecab_controller.basic_types import MecabParsedToken -SEP_PITCH_GROUP = ' ' -SEP_PITCH_TYPES = ',' -SEP_READING_PITCH = ':' -SEP_PITCH_TYPE_NUM = '-' +SEP_PITCH_GROUP = " " +SEP_PITCH_TYPES = "," +SEP_READING_PITCH = ":" +SEP_PITCH_TYPE_NUM = "-" class PitchType(enum.Enum): @@ -50,10 +50,11 @@ def has_accent(self) -> bool: return bool(self.pitches and any(pitch.type != PitchType.unknown for pitch in self.pitches)) def describe_pitches(self) -> str: - return self.katakana_reading + SEP_READING_PITCH + SEP_PITCH_TYPES.join(dict.fromkeys( - pitch.describe() - for pitch in self.pitches - )) + return ( + self.katakana_reading + + SEP_READING_PITCH + + SEP_PITCH_TYPES.join(dict.fromkeys(pitch.describe() for pitch in self.pitches)) + ) @classmethod def from_formatted(cls, entry: FormattedEntry): @@ -94,6 +95,7 @@ class AccDbParsedToken(MecabParsedToken): """ Add pitch number to the parsed token """ + headword_accents: Sequence[PitchAccentEntry] def describe_pitches(self) -> str: @@ -140,8 +142,8 @@ def main(): assert token.describe_pitches() == "なや:heiban,atamadaka" token = AccDbParsedToken( - word='粗末', - headword='粗末', + word="粗末", + headword="粗末", katakana_reading=None, part_of_speech=PartOfSpeech.unknown, inflection_type=Inflection.dictionary_form, @@ -154,5 +156,5 @@ def main(): assert token.describe_pitches() == "ソマツ:atamadaka" -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pitch_accents/common.py b/pitch_accents/common.py index 9059e93..125de62 100644 --- a/pitch_accents/common.py +++ b/pitch_accents/common.py @@ -13,7 +13,7 @@ def is_dunder(name: str) -> bool: - """ Returns whether name is a dunder name. """ + """Returns whether name is a dunder name.""" return name.startswith("__") and name.endswith("__") @@ -62,7 +62,7 @@ def pitch_number_html(self): AccentDict = NewType("AccentDict", dict[str, Sequence[FormattedEntry]]) -RE_PITCH_NUM = re.compile(r'\d+|\?') +RE_PITCH_NUM = re.compile(r"\d+|\?") def split_pitch_numbers(s: str) -> list[str]: @@ -80,5 +80,5 @@ def main(): print(split_pitch_numbers("1")) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/pitch_accents/format_accents.py b/pitch_accents/format_accents.py index 2232cdd..149ad3d 100644 --- a/pitch_accents/format_accents.py +++ b/pitch_accents/format_accents.py @@ -92,10 +92,10 @@ def format_nakadaka(moras: Sequence[str], accent: int) -> str: def format_entry(moras: Sequence[str], accent: Union[int, str]) -> str: - """ Format an entry from the data in the original pitch accents file to something that uses html """ + """Format an entry from the data in the original pitch accents file to something that uses html""" if type(accent) != int: - return literal_pronunciation(''.join(moras)) + return literal_pronunciation("".join(moras)) elif len(moras) == 1: return format_one_mora_word(moras, is_flat=(accent == 0)) elif accent == 0: diff --git a/pitch_accents/styles.py b/pitch_accents/styles.py index db2700c..a474a86 100644 --- a/pitch_accents/styles.py +++ b/pitch_accents/styles.py @@ -14,102 +14,98 @@ class PitchPatternStyle(enum.Enum): class XmlTags: # low accent, underline ___ - low_start = '' - low_end = '' + low_start = "" + low_end = "" # low accent, rising _/ - low_rise_start = '' - low_rise_end = '' + low_rise_start = "" + low_rise_end = "" # high accent, overline ‾‾‾ - high_start = '' - high_end = '' + high_start = "" + high_end = "" # high accent, going down ‾‾‾\ - high_drop_start = '' - high_drop_end = '' # ꜜ (ꜜ) + high_drop_start = "" + high_drop_end = "" # ꜜ (ꜜ) # NHK data only: - nasal_start = '' # Red color ° (°) - nasal_end = '' - devoiced_start = '' - devoiced_end = '' + nasal_start = "" # Red color ° (°) + nasal_end = "" + devoiced_start = "" + devoiced_end = "" STYLE_MAP = { PitchPatternStyle.javdejong: { # Style used in the original Japanese Pitch Accent Anki add-on. # Low accents aren't marked, high accents are marked with an overline. - # low - XmlTags.low_start: '', - XmlTags.low_end: '', + XmlTags.low_start: "", + XmlTags.low_end: "", # low, rise at the end - XmlTags.low_rise_start: '', - XmlTags.low_rise_end: '', + XmlTags.low_rise_start: "", + XmlTags.low_rise_end: "", # high XmlTags.high_start: '', - XmlTags.high_end: '', + XmlTags.high_end: "", # high, drop at the end XmlTags.high_drop_start: '', - XmlTags.high_drop_end: 'ꜜ', # down arrow at the end + XmlTags.high_drop_end: "ꜜ", # down arrow at the end # nasal, e.g. カ゚ XmlTags.nasal_start: '', - XmlTags.nasal_end: '', + XmlTags.nasal_end: "", # devoiced XmlTags.devoiced_start: '', - XmlTags.devoiced_end: '', + XmlTags.devoiced_end: "", }, PitchPatternStyle.u_biq: { # Style used on the u-biq website, https://accent.u-biq.org/ - # low XmlTags.low_start: '', - XmlTags.low_end: '', + XmlTags.low_end: "", # low, rise at the end XmlTags.low_rise_start: '', - XmlTags.low_rise_end: '', + XmlTags.low_rise_end: "", # high XmlTags.high_start: '', - XmlTags.high_end: '', + XmlTags.high_end: "", # high, drop at the end XmlTags.high_drop_start: '', - XmlTags.high_drop_end: '', + XmlTags.high_drop_end: "", # nasal, e.g. カ゚ XmlTags.nasal_start: '', - XmlTags.nasal_end: '', + XmlTags.nasal_end: "", # devoiced XmlTags.devoiced_start: '', - XmlTags.devoiced_end: '', + XmlTags.devoiced_end: "", }, PitchPatternStyle.none: { # Use class names. # The user can configure their own styles in the Styling section of the card type. - # low XmlTags.low_start: '', - XmlTags.low_end: '', + XmlTags.low_end: "", # low, rise at the end XmlTags.low_rise_start: '', - XmlTags.low_rise_end: '', + XmlTags.low_rise_end: "", # high XmlTags.high_start: '', - XmlTags.high_end: '', + XmlTags.high_end: "", # high, drop at the end XmlTags.high_drop_start: '', - XmlTags.high_drop_end: '', + XmlTags.high_drop_end: "", # nasal, e.g. カ゚ XmlTags.nasal_start: '', - XmlTags.nasal_end: '', + XmlTags.nasal_end: "", # devoiced XmlTags.devoiced_start: '', - XmlTags.devoiced_end: '', + XmlTags.devoiced_end: "", }, PitchPatternStyle.kanjium: { # Style which is part of the kanjium project https://github.com/mifunetoshiro/kanjium - # low - XmlTags.low_start: '', - XmlTags.low_end: '', + XmlTags.low_start: "", + XmlTags.low_end: "", # low, rise at the end - XmlTags.low_rise_start: '', - XmlTags.low_rise_end: '', + XmlTags.low_rise_start: "", + XmlTags.low_rise_end: "", # high XmlTags.high_start: '', XmlTags.high_end: '', @@ -118,10 +114,9 @@ class XmlTags: XmlTags.high_drop_end: "", # nasal, e.g. カ゚ XmlTags.nasal_start: '', - XmlTags.nasal_end: '', + XmlTags.nasal_end: "", # devoiced XmlTags.devoiced_start: '', - XmlTags.devoiced_end: '', - } + XmlTags.devoiced_end: "", + }, } - diff --git a/pitch_accents/user_accents.py b/pitch_accents/user_accents.py index 724167c..b4fde5c 100644 --- a/pitch_accents/user_accents.py +++ b/pitch_accents/user_accents.py @@ -25,7 +25,8 @@ def search_pitch_accent_numbers(accents: str) -> Iterable[Union[str, int]]: class AccentEntry(NamedTuple): - """ Represents a parsed entry in the user TSV file. """ + """Represents a parsed entry in the user TSV file.""" + headword: str moras: tuple[str, ...] accents: tuple[Union[str, int], ...] @@ -35,7 +36,7 @@ def has_accent(self): @classmethod def from_csv_line(cls, line: str): - headword, reading, accents = line.split('\t') + headword, reading, accents = line.split("\t") return cls( headword=headword, moras=tuple(kana_to_moras(to_katakana(reading or headword))), @@ -46,7 +47,7 @@ def from_csv_line(cls, line: str): def create_formatted(entry: AccentEntry) -> Collection[FormattedEntry]: return dict.fromkeys( FormattedEntry( - katakana_reading=''.join(entry.moras), + katakana_reading="".join(entry.moras), html_notation=format_entry(entry.moras, pitch_num), pitch_number=str(pitch_num), ) @@ -72,7 +73,7 @@ def read_entries(self) -> Iterable[AccentEntry]: yield AccentEntry.from_csv_line(line) def create_formatted(self) -> AccentDict: - """ Build the derived pitch accents file from the original pitch accents file and save it as *.csv """ + """Build the derived pitch accents file from the original pitch accents file and save it as *.csv""" temp_dict: AccentDict = collections.defaultdict(dict) for entry in self.read_entries(): temp_dict[entry.headword].update(create_formatted(entry)) @@ -92,5 +93,5 @@ def main(): print(f"{key=}; {value=}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/reading.py b/reading.py index 73295ac..f23b022 100644 --- a/reading.py +++ b/reading.py @@ -29,7 +29,7 @@ def convert_to_inline_style(txt: str) -> str: - """ Map style classes to their user-configured inline versions. """ + """Map style classes to their user-configured inline versions.""" for k, v in STYLE_MAP[cfg.pitch_accent.style].items(): txt = txt.replace(k, v) return txt @@ -55,11 +55,11 @@ def should_ignore_incorrect_reading(expr_reading: str) -> bool: https://tatsumoto-ren.github.io/blog/discussing-various-card-templates.html#distinguishing-readings """ return ( - expr_reading.isnumeric() - or cfg.furigana.reading_separator.strip() in expr_reading - or MULTIPLE_READING_SEP in expr_reading - or expr_reading.startswith('x') - or expr_reading.startswith('×') + expr_reading.isnumeric() + or cfg.furigana.reading_separator.strip() in expr_reading + or MULTIPLE_READING_SEP in expr_reading + or expr_reading.startswith("x") + or expr_reading.startswith("×") ) @@ -74,7 +74,7 @@ def split_possible_furigana(expr: str) -> WordReading: # If there are numbers or multiple readings present, ignore all of them. if expr_reading and should_ignore_incorrect_reading(expr_reading): - expr_reading = '' + expr_reading = "" return WordReading(expr, expr_reading) @@ -124,7 +124,7 @@ def get_pronunciations(expr: str, sanitize: bool = True, recurse: bool = True, u # or if the kana reading of the full expression can be sourced from mecab, # and the user wants to perform kana lookups, then try the reading. if not ret and cfg.pitch_accent.kana_lookups: - expr_reading = (expr_reading or single_word_reading(expr)) + expr_reading = expr_reading or single_word_reading(expr) if expr_reading and (lookup_reading := acc_dict.lookup(expr_reading)): ret.setdefault(expr, []).extend(lookup_reading) @@ -166,7 +166,7 @@ def get_notation(entry: FormattedEntry, mode: PitchOutputFormat) -> str: elif mode == PitchOutputFormat.number: return entry.pitch_number elif mode == PitchOutputFormat.html_and_number: - return update_html(f'{entry.html_notation} {entry.pitch_number_html}') + return update_html(f"{entry.html_notation} {entry.pitch_number_html}") raise Exception("Unreachable.") @@ -185,12 +185,12 @@ def entries_to_html(entries: Sequence[FormattedEntry], output_format: PitchOutpu def format_pronunciations( - pronunciations: AccentDict, - output_format: PitchOutputFormat = PitchOutputFormat.html, - sep_single: str = "・", - sep_multi: str = "、", - expr_sep: str = None, - max_results: int = None, + pronunciations: AccentDict, + output_format: PitchOutputFormat = PitchOutputFormat.html, + sep_single: str = "・", + sep_multi: str = "、", + expr_sep: str = None, + max_results: int = None, ) -> str: ordered_dict = OrderedDict() for word, entries in pronunciations.items(): @@ -230,7 +230,7 @@ def format_furigana_readings(word: str, hiragana_readings: Sequence[str]) -> str def format_hiragana_readings(readings: Sequence[str]) -> str: - """ Discard kanji and format the readings as hiragana. """ + """Discard kanji and format the readings as hiragana.""" if 1 < len(readings): return f"({cfg.furigana.reading_separator.join(map(to_hiragana, readings))})" else: @@ -238,12 +238,12 @@ def format_hiragana_readings(readings: Sequence[str]) -> str: def discard_extra_readings( - readings: Sequence[str], - *, - max_results: int, - discard_mode: ReadingsDiscardMode + readings: Sequence[str], + *, + max_results: int, + discard_mode: ReadingsDiscardMode, ) -> Sequence[str]: - """ Depending on the settings, if there are too many readings, discard some or all but the first. """ + """Depending on the settings, if there are too many readings, discard some or all but the first.""" if max_results <= 0 or len(readings) <= max_results: return readings elif discard_mode == ReadingsDiscardMode.discard_extra: @@ -273,7 +273,7 @@ def try_lookup_full_text(text: str) -> Iterable[AccDbParsedToken]: part_of_speech=PartOfSpeech.unknown, inflection_type=Inflection.dictionary_form, katakana_reading=None, - headword_accents=[PitchAccentEntry.from_formatted(entry) for entry in entries] + headword_accents=[PitchAccentEntry.from_formatted(entry) for entry in entries], ) @@ -306,7 +306,7 @@ def all_hiragana_readings(token: AccDbParsedToken) -> Iterable[str]: yield adjust_to_inflection( raw_word=token.word, headword=token.headword, - headword_reading=to_hiragana(entry.katakana_reading) + headword_reading=to_hiragana(entry.katakana_reading), ) @@ -335,13 +335,13 @@ def format_acc_db_result(out: AccDbParsedToken, full_hiragana: bool = False) -> def append_accents(token: MecabParsedToken) -> AccDbParsedToken: return AccDbParsedToken( **dataclasses.asdict(token), - headword_accents=[PitchAccentEntry.from_formatted(entry) for entry in iter_accents(token.headword)] + headword_accents=[PitchAccentEntry.from_formatted(entry) for entry in iter_accents(token.headword)], ) def format_parsed_tokens( - tokens: Sequence[Union[AccDbParsedToken, Token]], - full_hiragana: bool = False + tokens: Sequence[Union[AccDbParsedToken, Token]], + full_hiragana: bool = False, ) -> Iterable[str]: for token in tokens: if isinstance(token, AccDbParsedToken): @@ -372,7 +372,7 @@ def generate_furigana(src_text: str, split_morphemes: bool = True, full_hiragana else: # Add the string as is, without furigana. substrings.append(token) - return ''.join(format_parsed_tokens(substrings, full_hiragana)).strip() + return "".join(format_parsed_tokens(substrings, full_hiragana)).strip() # Entry point diff --git a/tasks.py b/tasks.py index 85ca5dd..68279fb 100644 --- a/tasks.py +++ b/tasks.py @@ -127,12 +127,12 @@ def html_to_media_line(txt: str) -> str: class DoTasks: def __init__( - self, - note: Note, - *, - caller: TaskCaller, - src_field: Optional[str] = None, - overwrite: bool = False, + self, + note: Note, + *, + caller: TaskCaller, + src_field: Optional[str] = None, + overwrite: bool = False, ): self._note = note self._caller = caller diff --git a/widgets/anki_style.py b/widgets/anki_style.py index 30e0201..38ee153 100644 --- a/widgets/anki_style.py +++ b/widgets/anki_style.py @@ -6,13 +6,15 @@ def fix_default_anki_style(self: QTableWidget): from aqt import mw + try: from aqt.theme import WidgetStyle except ImportError: # Running an old version of Anki. No action is necessary. return if mw.pm.get_widget_style() == WidgetStyle.ANKI: - self.setStyleSheet(""" + self.setStyleSheet( + """ QTableWidget, QTableView, QLineEdit, @@ -26,4 +28,5 @@ def fix_default_anki_style(self: QTableWidget): border-radius: 0px; padding: 0px; } - """) + """ + ) diff --git a/widgets/audio_sources.py b/widgets/audio_sources.py index f88881d..52ed79d 100644 --- a/widgets/audio_sources.py +++ b/widgets/audio_sources.py @@ -20,11 +20,13 @@ class SourceEnableCheckbox(QCheckBox): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.setStyleSheet(""" + self.setStyleSheet( + """ QCheckBox { margin: 0 auto; } - """) + """ + ) def tooltip_cache_remove_complete(removed: list[AudioSourceConfig]): @@ -45,8 +47,7 @@ def tooltip_cache_remove_complete(removed: list[AudioSourceConfig]): class AudioManagerInterface(typing.Protocol): - def request_new_session(self): - ... + def request_new_session(self): ... class AudioSourcesTable(ExpandingTableWidget): @@ -138,7 +139,7 @@ def iterateConfigs(self) -> Iterable[AudioSourceConfig]: if all(row) and (row := pack_back(row)).is_valid: row.name = normalize_filename(row.name) while row.name in sources: - row.name += '(new)' + row.name += "(new)" sources[row.name] = row return sources.values() @@ -156,7 +157,7 @@ def populate(self, sources: Iterable[AudioSourceConfig]): def fillCellContent(self, row_n: int, col_n: int, content: str): if isinstance(cell := self.getCellContent(row_n, col_n), QCheckBox): - return cell.setChecked(any(value in content.lower() for value in ('true', 'yes', 'y'))) + return cell.setChecked(any(value in content.lower() for value in ("true", "yes", "y"))) return super().fillCellContent(row_n, col_n, content) @@ -204,5 +205,5 @@ def main(): sys.exit() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/widgets/audio_sources_stats.py b/widgets/audio_sources_stats.py index 66915b2..91c038b 100644 --- a/widgets/audio_sources_stats.py +++ b/widgets/audio_sources_stats.py @@ -61,7 +61,7 @@ def get_mock_stats() -> TotalAudioStats: AudioStats("tick", 5, 6), AudioStats("tack", 7, 7), AudioStats("toe", 10, 9), - ] + ], ) @@ -73,5 +73,5 @@ def main(): app.exec() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/widgets/enum_selector.py b/widgets/enum_selector.py index c6ac95b..780f1c9 100644 --- a/widgets/enum_selector.py +++ b/widgets/enum_selector.py @@ -10,11 +10,11 @@ class EnumSelectCombo(QComboBox): def __init__( - self, - enum_type: enum.EnumMeta, - initial_value: Union[enum.Enum, str] = None, - show_values: bool = False, - parent=None, + self, + enum_type: enum.EnumMeta, + initial_value: Union[enum.Enum, str] = None, + show_values: bool = False, + parent=None, ): super().__init__(parent) for item in enum_type: diff --git a/widgets/pitch_override_table.py b/widgets/pitch_override_table.py index e136045..c1753d1 100644 --- a/widgets/pitch_override_table.py +++ b/widgets/pitch_override_table.py @@ -20,7 +20,7 @@ def is_comma_separated_list_of_numbers(text: str): - return bool(re.fullmatch(r'[0-9,]+', text)) + return bool(re.fullmatch(r"[0-9,]+", text)) def is_allowed_accent_notation(text: str): @@ -36,7 +36,7 @@ class PitchAccentTableRow(NamedTuple): class PitchOverrideTable(ExpandingTableWidget): _columns = tuple(ui_translate(s) for s in PitchAccentTableRow._fields) _sep_regex = re.compile(r"[ \r\t\n.;。、;・]+", flags=re.IGNORECASE | re.MULTILINE) - _column_sep = '\t' + _column_sep = "\t" @classmethod def from_tsv(cls, file_path: str, *args): @@ -45,14 +45,14 @@ def from_tsv(cls, file_path: str, *args): def read_tsv_file(self, file_path: str) -> Collection[PitchAccentTableRow]: table_rows = {} if os.path.isfile(file_path): - with open(file_path, encoding='utf8') as f: + with open(file_path, encoding="utf8") as f: try: table_rows.update(dict.fromkeys( PitchAccentTableRow(*line.strip().split(self._column_sep)) for line in f )) except TypeError as ex: - error = str(ex).replace('.__new__()', '') + error = str(ex).replace(".__new__()", "") showInfo(f"The file is formatted incorrectly. {error}.", type="warning", parent=self) return table_rows.keys() @@ -81,8 +81,8 @@ def as_tsv(self) -> list[str]: def dump(self, file_path: str): try: - with open(file_path, 'w', encoding='utf8') as of: - of.write('\n'.join(self.as_tsv())) + with open(file_path, "w", encoding="utf8") as of: + of.write("\n".join(self.as_tsv())) except OSError as ex: showInfo(f"{ex.__class__.__name__}: this file can't be written.", type="warning", parent=self) @@ -104,12 +104,12 @@ def initUI(self): layout.addWidget(self.table) # example rows - self.table.addRow(['咖哩', 'かれー', '0']) - self.table.addRow(['敷礼', 'しきれい', '0']) - self.table.addRow(['器量良し', 'きりょうよし', '2']) - self.table.addRow(['隅に置けない', 'すみにおけない', '1']) - self.table.addRow(['尾骶骨', 'びていこつ', '2']) - self.table.addRow(['管水母', 'くだくらげ', '3']) + self.table.addRow(["咖哩", "かれー", "0"]) + self.table.addRow(["敷礼", "しきれい", "0"]) + self.table.addRow(["器量良し", "きりょうよし", "2"]) + self.table.addRow(["隅に置けない", "すみにおけない", "1"]) + self.table.addRow(["尾骶骨", "びていこつ", "2"]) + self.table.addRow(["管水母", "くだくらげ", "3"]) def main(): @@ -119,5 +119,5 @@ def main(): sys.exit(app.exec()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/widgets/pitch_override_widget.py b/widgets/pitch_override_widget.py index 32c3076..5552895 100644 --- a/widgets/pitch_override_widget.py +++ b/widgets/pitch_override_widget.py @@ -36,26 +36,26 @@ def write_tsv_file(): name, mime = QFileDialog.getSaveFileName( parent=cast(QWidget, self), caption="Save override table as TSV File", - directory=cfg['last_file_save_location'], + directory=cfg["last_file_save_location"], filter=self._filename_filter, ) if not name: return tooltip("Aborted.") self._table.dump(name) - cfg['last_file_save_location'] = name # may or may not be lost + cfg["last_file_save_location"] = name # may or may not be lost def read_tsv_file(): # noinspection PyArgumentList name, mime = QFileDialog.getOpenFileName( parent=cast(QWidget, self), - caption='Load override table from TSV File', - directory=cfg['last_file_save_location'], + caption="Load override table from TSV File", + directory=cfg["last_file_save_location"], filter=self._filename_filter, ) if not name: return tooltip("Aborted.") self._table.update_from_tsv(name, reset_table=False) - cfg['last_file_save_location'] = name # may or may not be lost + cfg["last_file_save_location"] = name # may or may not be lost qconnect(self._import_button.clicked, read_tsv_file) qconnect(self._export_button.clicked, write_tsv_file) diff --git a/widgets/table.py b/widgets/table.py index 711c67a..3201053 100644 --- a/widgets/table.py +++ b/widgets/table.py @@ -123,7 +123,7 @@ def insertCellContent(self, row_n: int, col_n: int, content: Union[str, QWidget] raise ValueError("Invalid parameter passed.") def addEmptyLastRow(self): - return self.addRow(cells=('' for _column in self._columns), index=self.rowCount()) + return self.addRow(cells=("" for _column in self._columns), index=self.rowCount()) def getCellContent(self, row_n: int, col_n: int) -> Optional[CellContent]: """ @@ -148,10 +148,10 @@ def fillCurrentRowFromClipBoard(self): """ def text_parts(): - return filter(bool, map(str.strip, re.split(self._sep_regex, QApplication.clipboard().text(), )), ) + return filter(bool, map(str.strip, re.split(self._sep_regex, QApplication.clipboard().text()))) def column_iter(): - return range(self.currentColumn(), self.columnCount(), ) + return range(self.currentColumn(), self.columnCount()) for col_n, text in zip(column_iter(), text_parts()): self.fillCellContent(self.currentRow(), col_n, text)