diff --git a/.cookiecutter.json b/.cookiecutter.json index e80c2a1..202857a 100644 --- a/.cookiecutter.json +++ b/.cookiecutter.json @@ -2,11 +2,12 @@ "_copy_without_render": [ "*.github" ], - "_template": "https://github.com/MartinBernstorff/swift-python-cookiecutter", + "_template": "https://github.com/MartinBernstorff/nimble-python-cookiecutter", "author": "Martin Bernstorff", "copyright_year": "2023", - "email": "martinbernstorfff@gmail.com", + "email": "martinbernstorff@gmail.com", "friendly_name": "Personal Mnemonic Medium", + "github_repo": "personal-mnemonic-medium", "github_user": "MartinBernstorff", "license": "MIT", "package_name": "personal_mnemonic_medium", diff --git a/.cruft.json b/.cruft.json index 3f72bab..d6b1af7 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,6 +1,6 @@ { - "template": "https://github.com/MartinBernstorff/swift-python-cookiecutter", - "commit": "525b1f682621953d7ef9deb6ad6cf7359ebd43c8", + "template": "https://github.com/MartinBernstorff/nimble-python-cookiecutter", + "commit": "80b90f37d8ce87ffb1ab97cf2b518fc0fd431dfb", "checkout": null, "context": { "cookiecutter": { @@ -8,15 +8,16 @@ "package_name": "personal_mnemonic_medium", "friendly_name": "Personal Mnemonic Medium", "author": "Martin Bernstorff", - "email": "martinbernstorfff@gmail.com", + "email": "martinbernstorff@gmail.com", "github_user": "MartinBernstorff", + "github_repo": "personal-mnemonic-medium", "version": "0.0.0", "copyright_year": "2023", "license": "MIT", "_copy_without_render": [ "*.github" ], - "_template": "https://github.com/MartinBernstorff/swift-python-cookiecutter" + "_template": "https://github.com/MartinBernstorff/nimble-python-cookiecutter" } }, "directory": null diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 841db02..ef8fca2 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,37 +1,39 @@ // For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile { - "name": "Existing Dockerfile", - "build": { - // Sets the run context to one level up instead of the .devcontainer folder. - "context": "..", - // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. - "dockerfile": "../Dockerfile" - }, - "customizations": { - "vscode": { - "extensions": [ - "ms-python.python", - "charliermarsh.ruff", - "ms-python.black-formatter", - "ms-azuretools.vscode-docker", - "ms-vscode.makefile-tools", - "github.vscode-github-actions" - ] - } - }, - "features": { - "ghcr.io/devcontainers/features/github-cli:1": {} - }, - "postStartCommand": "pip install -e ." - // Features to add to the dev container. More info: https://containers.dev/features. - // "features": {}, - // Use 'forwardPorts' to make a list of ports inside the container available locally. - // "forwardPorts": [], - // Uncomment the next line to run commands after the container is created. - // "postCreateCommand": "cat /etc/os-release", - // Configure tool-specific properties. - // "customizations": {}, - // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. - // "remoteUser": "devcontainer" + "name": "Existing Dockerfile", + "build": { + // Sets the run context to one level up instead of the .devcontainer folder. + "context": "..", + // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. + "dockerfile": "../Dockerfile", + "cacheFrom": "ghcr.io/martinbernstorff/personal-mnemonic-medium-devcontainer:latest" + }, + // "features": {}, + "customizations": { + "vscode": { + "extensions": [ + "GitHub.copilot", + "charliermarsh.ruff", + "ms-python.python", + "ms-python.vscode-pylance", + "GitHub.vscode-pull-request-github", + "ms-vscode.makefile-tools", + "github.vscode-github-actions", + ] + } + }, + "features": { + "ghcr.io/devcontainers/features/github-cli:1": {} + }, + "postStartCommand": "make install" + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + // Uncomment the next line to run commands after the container is created. + // Configure tool-specific properties. + // "customizations": {}, + // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "devcontainer" } \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 56f513c..0000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,8 +0,0 @@ -- [ ] I have considered whether this PR needs review, and requested a review if necessary. - -Fixes issue # - -# Notes for reviewers -Reviewers can skip X, but should pay attention to Y. - - \ No newline at end of file diff --git a/.github/workflows/dependabot_automerge.yml b/.github/workflows/dependabot_automerge.yml deleted file mode 100644 index 22d2ecd..0000000 --- a/.github/workflows/dependabot_automerge.yml +++ /dev/null @@ -1,30 +0,0 @@ -# GitHub action to automerge dependabot PRs. Only merges if tests passes the -# branch protections in the repository settings. -# You can set branch protections in the repository under Settings > Branches > Add rule -name: automerge-bot-prs - -on: pull_request - -permissions: - contents: write - pull-requests: write - -jobs: - dependabot-automerge: - runs-on: ubuntu-latest - # if actor is dependabot or pre-commit-ci[bot] then run - if: ${{ github.actor == 'dependabot[bot]' }} - - steps: - # Checkout action is required for token to persist - - name: Enable auto-merge for Dependabot PRs - run: gh pr merge --auto --merge "$PR_URL" # Use Github CLI to merge automatically the PR - env: - PR_URL: ${{github.event.pull_request.html_url}} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Auto approve dependabot PRs - if: ${{ github.actor == 'dependabot[bot]' }} - uses: hmarr/auto-approve-action@v3.1.0 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/validate.yml b/.github/workflows/tests.yml similarity index 54% rename from .github/workflows/validate.yml rename to .github/workflows/tests.yml index 8064d43..ac59250 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/tests.yml @@ -1,15 +1,17 @@ -# GitHub action to check if pre-commit has been run. Runs from .pre-commit-config.yaml, where the pre-commit actions are. - -name: validate +# This workflow will install Python dependencies, run pytests and run notebooks +# then it will in python 3.9 (ubuntu-latest) create a badge with the coverage +# and add it to the PR. This badge will be updated if the PR is updated. +name: Tests on: - pull_request: - branches: [main] push: branches: [main] + pull_request: + branches: [main] jobs: - build: + build-and-test: + permissions: write-all concurrency: group: "${{ github.workflow }} @ ${{ github.ref }}" cancel-in-progress: true @@ -22,14 +24,15 @@ jobs: uses: docker/login-action@v2 with: registry: ghcr.io - username: ${{ github.repository_owner }} + username: MartinBernstorff password: ${{ secrets.GITHUB_TOKEN }} - name: Pre-build dev container image uses: devcontainers/ci@v0.3 with: - imageName: ghcr.io/martinbernstorff/personal-mnemonic-medium - cacheFrom: ghcr.io/martinbernstorff/personal-mnemonic-medium - push: always + imageName: ghcr.io/martinbernstorff/personal-mnemonic-medium-devcontainer + # cacheFrom: ghcr.io/martinbernstorff/personal-mnemonic-medium-devcontainer:latest + push: filter + refFilterForPush: refs/heads/main runCmd: - make validate \ No newline at end of file + make validate diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index b759487..0000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Run main on Life Lessons", - "type": "python", - "request": "launch", - "program": "${file}", - "console": "integratedTerminal", - "args": [ - "/input/", - "Life.apkg" - ], - } - ] -} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 190db2e..2483f95 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,12 +1,19 @@ { + "python.analysis.typeCheckingMode": "strict", "python.testing.pytestArgs": [ - "tests" + "personal_mnemonic_medium" ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, - "python.analysis.typeCheckingMode": "strict", - "[python]": { - "editor.defaultFormatter": "ms-python.black-formatter" + "explorer.excludeGitIgnore": false, + "files.exclude": { + "**/.git": true, + "**/.svn": true, + "**/.hg": true, + "**/CVS": true, + "**/.DS_Store": true, + "**/Thumbs.db": true, + "**/BUILD": true }, - "python.formatting.provider": "none" + "python.analysis.diagnosticMode": "workspace" } \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 2c144c2..a01c108 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,16 +3,16 @@ FROM python:3.11-bookworm # Set the working directory to /app WORKDIR /app +RUN pip install pyright +RUN pyright . # Install deps COPY pyproject.toml ./ -RUN pip install .[dev] -RUN pip install .[tests] +RUN pip install --upgrade .[dev] +RUN pip install --upgrade .[tests] # Ensure pyright builds correctly. # If run in make validate, it is run in parallel, which breaks its installation. -RUN pyright . - # Install the entire app COPY . /app RUN pip install -e . diff --git a/application/main.py b/application/main.py index 5a11010..22d55ec 100644 --- a/application/main.py +++ b/application/main.py @@ -1,27 +1,31 @@ -from collections import defaultdict from pathlib import Path from time import sleep -from typing import Annotated, Any, Dict +from typing import Annotated, Any import sentry_sdk import typer +from functionalpy import Seq from personal_mnemonic_medium.card_pipeline import CardPipeline from personal_mnemonic_medium.exporters.anki.package_generator import ( AnkiPackageGenerator, ) from personal_mnemonic_medium.exporters.anki.sync import sync_deck -from personal_mnemonic_medium.note_factories.markdown import MarkdownNoteFactory +from personal_mnemonic_medium.note_factories.markdown import ( + MarkdownNoteFactory, +) from personal_mnemonic_medium.prompt_extractors.cloze_extractor import ( ClozePromptExtractor, ) -from personal_mnemonic_medium.prompt_extractors.qa_extractor import QAPromptExtractor +from personal_mnemonic_medium.prompt_extractors.qa_extractor import ( + QAPromptExtractor, +) from wasabi import Printer msg = Printer(timestamp=True) # helper for creating anki connect requests -def request(action: Any, **params: Any) -> Dict[str, Any]: +def request(action: Any, **params: Any) -> dict[str, Any]: return {"action": action, "params": params, "version": 6} @@ -30,12 +34,16 @@ def main( host_output_dir: Path, watch: Annotated[ bool, - typer.Option(help="Keep running, updating Anki deck every 15 seconds"), + typer.Option( + help="Keep running, updating Anki deck every 15 seconds" + ), ], ): """Run the thing.""" if not input_dir.exists(): - raise FileNotFoundError(f"Input directory {input_dir} does not exist") + raise FileNotFoundError( + f"Input directory {input_dir} does not exist" + ) if not host_output_dir.exists(): msg.info(f"Creating output directory {host_output_dir}") @@ -59,17 +67,17 @@ def main( ClozePromptExtractor(), ], card_exporter=AnkiPackageGenerator(), # Step 3, get the cards from the prompts - ).run( - input_path=input_dir, - ) - - decks = defaultdict(list) + ).run(input_path=input_dir) - for card in cards: - decks[card.deckname] += [card] + grouped_cards = ( + Seq(cards).group_by(lambda card: card.deckname).to_iter() + ) - for deck in decks: - deck_bundle = AnkiPackageGenerator().cards_to_deck_bundle(cards=decks[deck]) + for group in grouped_cards: + cards = group.group_contents.to_list() + deck_bundle = AnkiPackageGenerator().cards_to_deck_bundle( + cards=cards + ) sync_deck( deck_bundle=deck_bundle, sync_dir_path=host_output_dir, @@ -79,10 +87,16 @@ def main( if watch: sleep_seconds = 60 - msg.good(f"Sync complete, sleeping for {sleep_seconds} seconds") + msg.good( + f"Sync complete, sleeping for {sleep_seconds} seconds" + ) sleep(sleep_seconds) - main(input_dir=input_dir, watch=watch, host_output_dir=host_output_dir) + main( + input_dir=input_dir, + watch=watch, + host_output_dir=host_output_dir, + ) + - if __name__ == "__main__": typer.run(main) diff --git a/makefile b/makefile index a547efa..f256dbf 100644 --- a/makefile +++ b/makefile @@ -1,28 +1,41 @@ -lint: - @echo Running black - black . +SRC_PATH = src/personal_mnemonic_medium - @echo Running ruff - ruff check . --fix +install-dev: + pip install --upgrade .[dev] -test: - @echo ––– Testing ––– - pytest -n auto -rfE --failed-first --disable-warnings -q +install: + make install-dev + pip install -e . -type-check: - @echo ––– Running static type checks ––– - pyright . +test: ## Run tests + pytest tests -install: - pip install --upgrade -e .[dev,tests] +lint: ## Format code + ruff format . + ruff . --fix \ + --extend-select F401 \ + --extend-select F841 -validate: - @echo ––– Ensuring dependencies are up to date. This will take a few moments --- - @make install > /dev/null - @make lint && make type-check && make test +type-check: ## Type-check code + pyright $(SRC_PATH) -pr: - gh pr create -w - make validate +validate: ## Run all checks + make lint + make type-check + make test + +sync-pr: + git push --set-upstream origin HEAD git push - gh pr merge --auto --merge \ No newline at end of file + +create-pr: + gh pr create -w || true + +merge-pr: + gh pr merge --auto --merge --delete-branch + +pr: ## Run relevant tests before PR + make sync-pr + make create-pr + make validate + make merge-pr \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 66fb2f3..eb15320 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,24 +1,17 @@ -# v1 -# v2 [build-system] requires = ["setuptools>=61.0.0", "wheel", "setuptools_scm"] build-backend = "setuptools.build_meta" [project] name = "personal-mnemonic-medium" -version = "0.2.0" -authors = [ - { name = "Martin Bernstorff", email = "martinbernstorfff@gmail.com" }, -] +version = "0.0.0" +authors = [{ name = "Martin Bernstorff", email = "martinbernstorff@gmail.com" }] description = "Personal Mnemonic Medium" -classifiers = [ - "Operating System :: POSIX :: Linux", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Programming Language :: Python :: 3.10", -] -requires-python = ">=3.10" +classifiers = ["Programming Language :: Python :: 3.11"] +requires-python = ">=3.11" + dependencies = [ + "functionalpy==0.6.0", "misaka==2.1.1", "genanki==0.13.0", "typer==0.9.0", @@ -27,12 +20,19 @@ dependencies = [ "sentry-sdk==1.32.0", ] +[project.license] +file = "LICENSE" + +[project.readme] +file = "README.md" +content-type = "text/markdown" + [project.optional-dependencies] dev = [ "cruft==2.15.0", "pyright==1.1.328", "pre-commit==2.20.0", - "ruff==0.0.254", + "ruff==0.1.3", "black==22.8.0", ] tests = [ @@ -48,19 +48,20 @@ repository = "https://github.com/MartinBernstorff/personal-mnemonic-medium" documentation = "https://MartinBernstorff.github.io/personal-mnemonic-medium/" [tool.pyright] -exclude = [".*venv*", ".tox", "*.apkg"] +exclude = [".*venv*"] pythonPlatform = "Darwin" -typeCheckingMode = "basic" - +reportMissingTypeStubs = false [tool.ruff] # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default. +line-length = 70 select = [ "A", "ANN", "ARG", "B", "C4", + "C90", "COM", "D417", "E", @@ -75,7 +76,6 @@ select = [ "PLW", "PT", "UP", - "Q", "PTH", "RSE", "RET", @@ -83,7 +83,18 @@ select = [ "SIM", "W", ] -ignore = ["ANN101", "ANN401", "E402", "E501", "F401", "F841", "UP006", "RET504"] +ignore = [ + "ANN101", + "ANN401", + "E402", + "E501", + "F841", + "RET504", + "COM812", + "COM819", + "W191", +] +ignore-init-module-imports = true # Allow autofix for all enabled rules (when `--fix`) is provided. unfixable = ["ERA"] # Exclude a variety of commonly ignored directories. @@ -112,7 +123,10 @@ exclude = [ ] # Allow unused variables when underscore-prefixed. dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" -target-version = "py39" +target-version = "py311" + +[tool.ruff.format] +skip-magic-trailing-comma = true [tool.ruff.flake8-annotations] mypy-init-return = true @@ -120,47 +134,11 @@ suppress-none-returning = true [tool.ruff.isort] known-third-party = ["wandb"] +split-on-trailing-comma = false [tool.ruff.mccabe] # Unlike Flake8, default to a complexity level of 10. max-complexity = 10 -[tool.semantic_release] -branch = "main" -version_variable = ["pyproject.toml:version"] -upload_to_pypi = false -upload_to_release = false -build_command = "python -m pip install build; python -m build" - [tool.setuptools] include-package-data = true - - -[tool.tox] -legacy_tox_ini = """ -[tox] -envlist = py{39} - -[testenv] -description: run unit tests -extras = tests -use_develop = true -commands = - pytest -n auto {posargs:test} - -[testenv:type] -description: run type checks -extras = tests, dev -basepython = py39 # Setting these explicitly avoid recreating env if your shell is set to a different version -use_develop = true -commands = - pyright . - -[testenv:docs] -description: build docs -extras = docs -basepython = py39 # Setting these explicitly avoid recreating env if your shell is set to a different version -use_develop = true -commands = - sphinx-build -b html docs docs/_build/html -""" diff --git a/readme.md b/readme.md index 85108c0..6116b5e 100644 --- a/readme.md +++ b/readme.md @@ -1,5 +1,18 @@ # Personal Mnemonic Medium -[![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/MartinBernstorff/personal-mnemonic-medium/) + +[![PyPI](https://img.shields.io/pypi/v/personal-mnemonic-medium.svg)][pypi status] +[![Python Version](https://img.shields.io/pypi/pyversions/personal-mnemonic-medium)][pypi status] +[![documentation](https://github.com/MartinBernstorff/personal-mnemonic-medium/actions/workflows/documentation.yml/badge.svg)][documentation] +[![Tests](https://github.com/MartinBernstorff/personal-mnemonic-medium/actions/workflows/tests.yml/badge.svg)][tests] +[![Black](https://img.shields.io/badge/code%20style-black-000000.svg)][black] + +[pypi status]: https://pypi.org/project/personal-mnemonic-medium/ +[documentation]: https://MartinBernstorff.github.io/personal-mnemonic-medium/ +[tests]: https://github.com/MartinBernstorff/personal-mnemonic-medium/actions?workflow=Tests +[black]: https://github.com/psf/black + + + Extracting spaced repetition prompts (flashcards) from documents. @@ -11,7 +24,10 @@ A [Zettelkasten](https://medium.com/@martinbernstorf/why-you-need-an-idea-manage This thinking is largely inspired by Andy Matuschak's [Personal Mnemonic Medium](https://notes.andymatuschak.org/The_mnemonic_medium_can_be_extended_to_one%E2%80%99s_personal_notes), and the code is based on the unmaintained [Ankdown](https://github.com/benwr/ankdown). -FYI-style open source, maintenance is not guaranteed. + + +## Installation + ## Pipeline The left path describes the abstract pipeline, the right path the current instantiation in this repo. @@ -33,10 +49,27 @@ graph TD Prompts -- AnkiPackageGenerator --> Cards ``` -## Contributing -To get a full +### Setting up a dev environment +1. Install [Orbstack](https://orbstack.dev/) or Docker Desktop. Make sure to complete the full install process before continuing. +2. If not installed, install VSCode +3. Press this [link](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/Aarhus-Psychiatry-Research/psycop-common) +4. Complete the setup process + +## Usage + +TODO: Add minimal usage example + +To see more examples, see the [documentation]. + +# 💬 Where to ask questions + +| Type | | +| ------------------------------ | ---------------------- | +| 🚨 **Bug Reports** | [GitHub Issue Tracker] | +| 🎁 **Feature Requests & Ideas** | [GitHub Issue Tracker] | +| 👩‍💻 **Usage Questions** | [GitHub Discussions] | +| 🗯 **General Discussion** | [GitHub Discussions] | -## Running through docker -To build and run the container, see `docker_cmd.sh`. +[github issue tracker]: https://github.com/MartinBernstorff/personal-mnemonic-medium/issues +[github discussions]: https://github.com/MartinBernstorff/personal-mnemonic-medium/discussions - diff --git a/src/personal_mnemonic_medium/card_pipeline.py b/src/personal_mnemonic_medium/card_pipeline.py index af01007..c11beca 100644 --- a/src/personal_mnemonic_medium/card_pipeline.py +++ b/src/personal_mnemonic_medium/card_pipeline.py @@ -1,12 +1,17 @@ from collections.abc import Sequence from pathlib import Path -from typing import List -from personal_mnemonic_medium.exporters.anki.card_types.base import AnkiCard +from personal_mnemonic_medium.exporters.anki.card_types.base import ( + AnkiCard, +) from personal_mnemonic_medium.exporters.base import CardExporter -from personal_mnemonic_medium.note_factories.base import DocumentFactory +from personal_mnemonic_medium.note_factories.base import ( + DocumentFactory, +) from personal_mnemonic_medium.note_factories.note import Document -from personal_mnemonic_medium.prompt_extractors.base import PromptExtractor +from personal_mnemonic_medium.prompt_extractors.base import ( + PromptExtractor, +) from personal_mnemonic_medium.prompt_extractors.prompt import Prompt @@ -21,17 +26,18 @@ def __init__( self.prompt_extractors = prompt_extractors self.card_exporter = card_exporter - def run( - self, - input_path: Path, - ) -> List[AnkiCard]: - notes: List[Document] = [] + def run(self, input_path: Path) -> list[AnkiCard]: + notes: list[Document] = [] if input_path.is_dir(): - notes += list(self.document_factory.get_notes_from_dir(dir_path=input_path)) + notes += list( + self.document_factory.get_notes_from_dir( + dir_path=input_path + ) + ) if not input_path.is_dir(): note_from_file = self.document_factory.get_note_from_file( - file_path=input_path, + file_path=input_path ) notes.append(note_from_file) @@ -42,6 +48,6 @@ def run( collected_prompts += extractor.extract_prompts(note) cards: list[AnkiCard] = self.card_exporter.prompts_to_cards( - prompts=collected_prompts, + prompts=collected_prompts ) return cards diff --git a/src/personal_mnemonic_medium/exporters/anki/card_types/base.py b/src/personal_mnemonic_medium/exporters/anki/card_types/base.py index 109eb1d..5f78591 100644 --- a/src/personal_mnemonic_medium/exporters/anki/card_types/base.py +++ b/src/personal_mnemonic_medium/exporters/anki/card_types/base.py @@ -2,11 +2,11 @@ import os import re from abc import ABC, abstractmethod +from collections.abc import Callable, Iterator from pathlib import Path -from typing import Any, Callable, List, Optional, Tuple +from typing import Any import genanki -from personal_mnemonic_medium.exporters.anki.globals import CONFIG from personal_mnemonic_medium.exporters.markdown_to_html.html_compiler import ( compile_field, ) @@ -22,9 +22,11 @@ class AnkiCard(ABC): def __init__( self, - fields: List[str], + fields: list[str], source_prompt: Prompt, - url_generator: Callable[[Path, Optional[int]], str] = get_obsidian_url, + url_generator: Callable[ + [Path, int | None], str + ] = get_obsidian_url, html_compiler: Callable[[str], str] = compile_field, ): self.markdown_fields = fields @@ -38,11 +40,11 @@ def source_markdown(self) -> str: return self.source_doc.content @property - def html_fields(self) -> List[str]: + def html_fields(self) -> list[str]: return list(map(self.html_compiler, self.markdown_fields)) @property - def tags(self) -> List[str]: + def tags(self) -> list[str]: return self.source_doc.tags @property @@ -82,7 +84,7 @@ def deckname(self) -> str: + self.subdeck ) raise ValueError( - "Subdeck length is 0", + "Subdeck length is 0" ) # This is purposefully non-valid code except: # noqa return "0. Don't click me::1. Active::Personal Mnemonic Medium" @@ -98,21 +100,22 @@ def add_field(self, field: Any): def get_source_button(self) -> str: """Get the button to open the source document.""" url = self.url_generator( - self.source_doc.source_path, - self.source_prompt.line_nr, + self.source_doc.source_path, self.source_prompt.line_nr ) html = f'

Open

' return html def to_genanki_note(self) -> genanki.Note: """Produce a genanki. Note with the specified guid.""" - if len(self.html_fields) > len(self.genanki_model.fields): + if len(self.html_fields) > len(self.genanki_model.fields): # type: ignore raise ValueError( - f"Too many fields for model {self.genanki_model.name}: {self.html_fields}", + f"Too many fields for model {self.genanki_model.name}: {self.html_fields}" # type: ignore ) - if len(self.html_fields) < len(self.genanki_model.fields): - while len(self.html_fields) < len(self.genanki_model.fields): + if len(self.html_fields) < len(self.genanki_model.fields): # type: ignore + while len(self.html_fields) < len( + self.genanki_model.fields # type: ignore + ): # type: ignore before_extras_field = len(self.html_fields) == 2 if before_extras_field: self.add_field(self.get_source_button()) @@ -132,9 +135,9 @@ def to_genanki_note(self) -> genanki.Note: tags=self.tags, ) - def make_ref_pair(self, filename: str) -> Tuple[Path, str]: + def make_ref_pair(self, filename: str) -> tuple[Path, str]: """Take a filename relative to the card, and make it absolute.""" - newname = "%".join(filename.split(os.sep)) + newname = "%".join(filename.split(os.sep)) # type: ignore # noqa: PTH206 if os.path.isabs(filename): # noqa abspath = Path(filename) @@ -146,25 +149,34 @@ def get_deck_dir(self) -> Path: # This is all it takes return Path(self.source_doc.source_path).parent - def determine_media_references(self): + def determine_media_references( + self + ) -> Iterator[tuple[Path, Path]]: """Find all media references in a card""" for i, field in enumerate(self.html_fields): current_stage = field for regex in [ - r'src="([^"]*?)"', + r'src="([^"]*?)"' ]: # TODO not sure how this should work:, r'\[sound:(.*?)\]']: results = [] - def process_match(m) -> str: # noqa - initial_contents = m.group(1) - abspath, newpath = self.make_ref_pair(initial_contents) - results.append((abspath, newpath)) # noqa + def process_match(m) -> str: # noqa # type: ignore + initial_contents = m.group(1) # type: ignore + abspath, newpath = self.make_ref_pair( + initial_contents # type: ignore + ) # type: ignore + results.append((abspath, newpath)) # noqa # type: ignore return r'src="' + newpath + '"' - current_stage = re.sub(regex, process_match, current_stage) + current_stage = re.sub( + regex, + process_match, + current_stage, # type: ignore + ) # type: ignore - for r in results: - yield r + yield from results # Anki seems to hate alt tags :( - self.html_fields[i] = re.sub(r'alt="[^"]*?"', "", current_stage) + self.html_fields[i] = re.sub( + r'alt="[^"]*?"', "", current_stage + ) diff --git a/src/personal_mnemonic_medium/exporters/anki/card_types/cloze.py b/src/personal_mnemonic_medium/exporters/anki/card_types/cloze.py index 3e5b854..626dd25 100644 --- a/src/personal_mnemonic_medium/exporters/anki/card_types/cloze.py +++ b/src/personal_mnemonic_medium/exporters/anki/card_types/cloze.py @@ -1,9 +1,11 @@ import re +from collections.abc import Callable from pathlib import Path -from typing import Callable, List, Optional import genanki -from personal_mnemonic_medium.exporters.anki.card_types.base import AnkiCard +from personal_mnemonic_medium.exporters.anki.card_types.base import ( + AnkiCard, +) from personal_mnemonic_medium.exporters.anki.globals import CONFIG from personal_mnemonic_medium.exporters.markdown_to_html.html_compiler import ( compile_field, @@ -18,9 +20,11 @@ class AnkiCloze(AnkiCard): def __init__( self, - fields: List[str], + fields: list[str], source_prompt: Prompt, - url_generator: Callable[[Path, Optional[int]], str] = get_obsidian_url, + url_generator: Callable[ + [Path, int | None], str + ] = get_obsidian_url, html_compiler: Callable[[str], str] = compile_field, ): super().__init__( @@ -32,13 +36,12 @@ def __init__( @property def genanki_model(self) -> genanki.Model: - global CONFIG # noqa return genanki.Model( model_id=simple_hash(CONFIG["card_model_name_cloze"]), # type: ignore name=CONFIG["card_model_name_cloze"], fields=CONFIG["card_model_fields_cloze"], templates=CONFIG["card_model_template_cloze"], - css=CONFIG["card_model_css"], + css=CONFIG["card_model_css"], # type: ignore model_type=1, # This is the model_type number for genanki, takes 0 for QA or 1 for cloze ) diff --git a/src/personal_mnemonic_medium/exporters/anki/card_types/qa.py b/src/personal_mnemonic_medium/exporters/anki/card_types/qa.py index 98310ec..53ddf3a 100644 --- a/src/personal_mnemonic_medium/exporters/anki/card_types/qa.py +++ b/src/personal_mnemonic_medium/exporters/anki/card_types/qa.py @@ -1,8 +1,10 @@ +from collections.abc import Callable # noqa: I001 from pathlib import Path -from typing import Callable, List, Optional import genanki -from personal_mnemonic_medium.exporters.anki.card_types.base import AnkiCard +from personal_mnemonic_medium.exporters.anki.card_types.base import ( + AnkiCard, +) from personal_mnemonic_medium.exporters.anki.globals import CONFIG from personal_mnemonic_medium.exporters.markdown_to_html.html_compiler import ( compile_field, @@ -12,14 +14,17 @@ ) from personal_mnemonic_medium.prompt_extractors.prompt import Prompt from personal_mnemonic_medium.utils.hasher import simple_hash +from personal_mnemonic_medium.exporters.anki.globals import CONFIG # noqa class AnkiQA(AnkiCard): def __init__( self, - fields: List[str], + fields: list[str], source_prompt: Prompt, - url_generator: Callable[[Path, Optional[int]], str] = get_obsidian_url, + url_generator: Callable[ + [Path, int | None], str + ] = get_obsidian_url, html_compiler: Callable[[str], str] = compile_field, ): super().__init__( diff --git a/src/personal_mnemonic_medium/exporters/anki/globals.py b/src/personal_mnemonic_medium/exporters/anki/globals.py index 6f32482..b0adbbd 100644 --- a/src/personal_mnemonic_medium/exporters/anki/globals.py +++ b/src/personal_mnemonic_medium/exporters/anki/globals.py @@ -1,15 +1,19 @@ # Anki 2.1 has mathjax built in, but ankidroid and other clients don't. import textwrap -from typing import Any, Dict +from typing import Any -from personal_mnemonic_medium.exporters.anki.anki_css import CARD_MODEL_CSS +from personal_mnemonic_medium.exporters.anki.anki_css import ( + CARD_MODEL_CSS, +) -ANKICONNECT_URL = "http://host.docker.internal:8765" # On host machine, port is 8765 +ANKICONNECT_URL = ( + "http://host.docker.internal:8765" +) # On host machine, port is 8765 CARD_MATHJAX_CONTENT = textwrap.dedent( """\ -""", +""" ) VERSION = "0.1" @@ -17,8 +21,12 @@ QUESTION_STR = r"{{ Question }}" ANSWER_STR = r"{{ Answer }}" EXTRA_STR = r"{{ Extra }}" -TTS_QUESTION_STR = r"{{ tts en_US voices=Apple_Samantha speed=1.05:Question }}" -TTS_ANSWER_STR = r"{{ tts en_US voices=Apple_Samantha speed=1.05:Answer }}" +TTS_QUESTION_STR = ( + r"{{ tts en_US voices=Apple_Samantha speed=1.05:Question }}" +) +TTS_ANSWER_STR = ( + r"{{ tts en_US voices=Apple_Samantha speed=1.05:Answer }}" +) QA_MODEL_TEMPLATE = [ { @@ -44,19 +52,19 @@ """, - }, + } ] CLOZE_MODEL_TEMPLATE = [ { "name": "Ankdown Cloze Card with UUID", "qfmt": r"{{{{cloze:Text}}}}\n
{{{{Extra}}}}
\n{}".format( - CARD_MATHJAX_CONTENT, + CARD_MATHJAX_CONTENT ), "afmt": r"{{{{cloze:Text}}}}\n
{{{{Extra}}}}
\n{}".format( - CARD_MATHJAX_CONTENT, + CARD_MATHJAX_CONTENT ), - }, + } ] CONFIG = { @@ -84,7 +92,7 @@ "card_model_template_cloze": CLOZE_MODEL_TEMPLATE, } -VERSION_LOG: Dict[Any, Any] = {} +VERSION_LOG: dict[Any, Any] = {} Q_TYPE_TAG = { "G": "med/type/1_GP", "A": "med/type/2_Acute_care", diff --git a/src/personal_mnemonic_medium/exporters/anki/package_generator.py b/src/personal_mnemonic_medium/exporters/anki/package_generator.py index 6fa3191..aca35ba 100644 --- a/src/personal_mnemonic_medium/exporters/anki/package_generator.py +++ b/src/personal_mnemonic_medium/exporters/anki/package_generator.py @@ -8,17 +8,26 @@ from dataclasses import dataclass from pathlib import Path from shutil import copyfile -from typing import Any, List, Set import genanki -from personal_mnemonic_medium.exporters.anki.card_types.base import AnkiCard -from personal_mnemonic_medium.exporters.anki.card_types.cloze import AnkiCloze -from personal_mnemonic_medium.exporters.anki.card_types.qa import AnkiQA +from personal_mnemonic_medium.exporters.anki.card_types.base import ( + AnkiCard, +) +from personal_mnemonic_medium.exporters.anki.card_types.cloze import ( + AnkiCloze, +) +from personal_mnemonic_medium.exporters.anki.card_types.qa import ( + AnkiQA, +) from personal_mnemonic_medium.exporters.base import CardExporter -from personal_mnemonic_medium.prompt_extractors.cloze_extractor import ClozePrompt +from personal_mnemonic_medium.prompt_extractors.cloze_extractor import ( + ClozePrompt, +) from personal_mnemonic_medium.prompt_extractors.prompt import Prompt -from personal_mnemonic_medium.prompt_extractors.qa_extractor import QAPrompt +from personal_mnemonic_medium.prompt_extractors.qa_extractor import ( + QAPrompt, +) from personal_mnemonic_medium.utils.hasher import simple_hash log = logging.getLogger(__name__) @@ -30,23 +39,15 @@ ) -class DeckCollection(dict): - """Defaultdict for decks, but with stored name.""" - - def __getitem__(self, deckname: str) -> Any: - if deckname not in self: - deck_id = simple_hash(deckname) - self[deckname] = genanki.Deck(deck_id, deckname) - return super().__getitem__(deckname) - - @dataclass(frozen=True) class DeckBundle: deck: genanki.Deck - media: Set[str] + media: set[str] def get_package(self) -> genanki.Package: - return genanki.Package(deck_or_decks=self.deck, media_files=list(self.media)) + return genanki.Package( + deck_or_decks=self.deck, media_files=list(self.media) + ) def save_deck_to_file(self, output_path: Path) -> Path: package = self.get_package() @@ -61,22 +62,19 @@ def __init__(self) -> None: pass @staticmethod - def cards_to_deck_bundle(cards: List[AnkiCard]) -> DeckBundle: + def cards_to_deck_bundle(cards: list[AnkiCard]) -> DeckBundle: """Take an iterable prompts, output an .apkg in a file called output_name. NOTE: We _must_ be in a temp directory. """ deck, media = AnkiPackageGenerator.cards_to_deck(cards=cards) - return DeckBundle( - deck=deck, - media=media, - ) + return DeckBundle(deck=deck, media=media) @staticmethod def cards_to_deck( - cards: Sequence[AnkiCard], - ) -> tuple[genanki.Deck, Set[str]]: - media = set() + cards: Sequence[AnkiCard] + ) -> tuple[genanki.Deck, set[str]]: + media = set() # type: ignore deck_name = cards[0].deckname deck_id = simple_hash(deck_name) @@ -86,24 +84,26 @@ def cards_to_deck( for abspath, newpath in card.determine_media_references(): try: copyfile( - abspath, - newpath, + abspath, newpath ) # This is inefficient but definitely works on all platforms. - media.add(newpath) + media.add(newpath) # type: ignore except FileNotFoundError as e: - log.debug(f"Could not find file {abspath} for media, {e}.") + log.debug( + f"Could not find file {abspath} for media, {e}." + ) try: deck.add_note(card.to_genanki_note()) except IndexError as e: - log.debug(f"Could not add card {card} to deck {deck_name}, {e}.") + log.debug( + f"Could not add card {card} to deck {deck_name}, {e}." + ) - return deck, media + return deck, media # type: ignore def prompts_to_cards( - self, - prompts: Sequence[Prompt], - ) -> List[AnkiCard]: + self, prompts: Sequence[Prompt] + ) -> list[AnkiCard]: """Takes an iterable of prompts and turns them into AnkiCards""" cards: list[AnkiCard] = [] @@ -111,19 +111,17 @@ def prompts_to_cards( for prompt in prompts: if isinstance(prompt, QAPrompt): card = AnkiQA( - fields=[ - prompt.question, - prompt.answer, - ], + fields=[prompt.question, prompt.answer], source_prompt=prompt, ) elif isinstance(prompt, ClozePrompt): card = AnkiCloze( - fields=[prompt.content], - source_prompt=prompt, + fields=[prompt.content], source_prompt=prompt ) else: - raise NotImplementedError(f"Prompt type {type(prompt)} not supported.") + raise NotImplementedError( + f"Prompt type {type(prompt)} not supported." + ) cards += [card] diff --git a/src/personal_mnemonic_medium/exporters/anki/sync.py b/src/personal_mnemonic_medium/exporters/anki/sync.py index 33cbc21..1e0264c 100644 --- a/src/personal_mnemonic_medium/exporters/anki/sync.py +++ b/src/personal_mnemonic_medium/exporters/anki/sync.py @@ -3,19 +3,23 @@ import urllib.request from pathlib import Path from time import sleep -from typing import Any, Dict, List +from typing import Any from genanki import Model, Note from wasabi import Printer -from personal_mnemonic_medium.exporters.anki.globals import ANKICONNECT_URL -from personal_mnemonic_medium.exporters.anki.package_generator import DeckBundle +from personal_mnemonic_medium.exporters.anki.globals import ( + ANKICONNECT_URL, +) +from personal_mnemonic_medium.exporters.anki.package_generator import ( + DeckBundle, +) msg = Printer(timestamp=True) # helper for creating anki connect requests -def request(action: Any, **params: Any) -> Dict[str, Any]: +def request(action: Any, **params: Any) -> dict[str, Any]: return {"action": action, "params": params, "version": 6} @@ -29,9 +33,13 @@ def invoke(action: Any, **params: Any) -> Any: Returns: Any: the response from anki connect """ - requestJson = json.dumps(request(action, **params)).encode("utf-8") + requestJson = json.dumps(request(action, **params)).encode( + "utf-8" + ) response = json.load( - urllib.request.urlopen(urllib.request.Request(ANKICONNECT_URL, requestJson)), + urllib.request.urlopen( + urllib.request.Request(ANKICONNECT_URL, requestJson) + ) ) if len(response) != 2: raise Exception("response has an unexpected number of fields") @@ -52,7 +60,7 @@ def anki_connect_is_live() -> bool: except Exception as err: msg.info(f"Attempted connection on {ANKICONNECT_URL}") msg.info( - "Unable to reach anki connect. Make sure anki is running and the Anki Connect addon is installed.", + "Unable to reach anki connect. Make sure anki is running and the Anki Connect addon is installed." ) msg.fail(f"Error was {err}") @@ -82,83 +90,113 @@ def sync_deck( return # get a list of anki cards in the deck - anki_note_info_by_guid, anki_note_guids = get_anki_note_infos(deck_bundle) + anki_note_info_by_guid, anki_note_guids = get_anki_note_infos( + deck_bundle + ) # get the unique guids of the md notes md_note_guids = get_md_note_infos(deck_bundle) note_diff = md_note_guids.symmetric_difference(anki_note_guids) if note_diff: - msg.info(" Syncing deck: ") - msg.info(f"\t{deck_bundle.deck.name}") - - added_note_guids = md_note_guids - anki_note_guids - if added_note_guids: - msg.info("\tNotes added: ") - msg.info(f"\t\t{added_note_guids}") - - removed_note_guids = anki_note_guids - md_note_guids - if removed_note_guids: - msg.info("\tNotes removed: ") - msg.info(f"\t\t{removed_note_guids}") - - package_path = deck_bundle.save_deck_to_file(save_dir_path / "deck.apkg") - try: - sync_path = str(sync_dir_path / "deck.apkg") - invoke("importPackage", path=sync_path) - print(f"Imported {deck_bundle.deck.name}!") - - if delete_cards: - try: - guids_to_delete = anki_note_guids - md_note_guids - if guids_to_delete: - note_ids = [ - anki_note_info_by_guid[guid]["noteId"] - for guid in guids_to_delete - ] - - invoke( - "deleteNotes", - notes=note_ids, - ) - msg.good(f"Deleted {len(guids_to_delete)} notes") - - except Exception: - msg.fail(f"Unable to delete cards in {deck_bundle.deck.name}") - # Print full stack trace - traceback.print_exc() - except Exception as e: - print(f"Unable to sync {package_path} to anki") - print(f"{e}") - traceback.print_exc() + _sync_deck( + deck_bundle=deck_bundle, + save_dir_path=save_dir_path, + sync_dir_path=sync_dir_path, + delete_cards=delete_cards, + anki_note_info_by_guid=anki_note_info_by_guid, + anki_note_guids=anki_note_guids, + md_note_guids=md_note_guids, + ) else: msg.info("Skipped") - msg.info(f"{deck_bundle.deck.name}") + msg.info(f"{deck_bundle.deck.name}") # type: ignore msg.info("\tNo notes added or removed") print("\n") +def _sync_deck( + deck_bundle: DeckBundle, + save_dir_path: Path, + sync_dir_path: Path, + delete_cards: bool, + anki_note_info_by_guid: dict[str, Any], + anki_note_guids: set[str], + md_note_guids: set[str], +): + msg.info(" Syncing deck: ") + msg.info(f"\t{deck_bundle.deck.name}") # type: ignore + + added_note_guids = md_note_guids - anki_note_guids + if added_note_guids: + msg.info("\tNotes added: ") + msg.info(f"\t\t{added_note_guids}") + + removed_note_guids = anki_note_guids - md_note_guids + if removed_note_guids: + msg.info("\tNotes removed: ") + msg.info(f"\t\t{removed_note_guids}") + + package_path = deck_bundle.save_deck_to_file( + save_dir_path / "deck.apkg" + ) + try: + sync_path = str(sync_dir_path / "deck.apkg") + invoke("importPackage", path=sync_path) + print(f"Imported {deck_bundle.deck.name}!") # type: ignore + + if delete_cards: + try: + guids_to_delete = anki_note_guids - md_note_guids + if guids_to_delete: + note_ids = [ + anki_note_info_by_guid[guid]["noteId"] + for guid in guids_to_delete + ] + + invoke("deleteNotes", notes=note_ids) + msg.good(f"Deleted {len(guids_to_delete)} notes") + + except Exception: + msg.fail( + f"Unable to delete cards in {deck_bundle.deck.name}" # type: ignore + ) + # Print full stack trace + traceback.print_exc() + except Exception as e: + print(f"Unable to sync {package_path} to anki") + print(f"{e}") + traceback.print_exc() + + def get_md_note_infos(deck_bundle: DeckBundle) -> set[str]: - md_notes: List[Note] = deck_bundle.deck.notes + md_notes: list[Note] = deck_bundle.deck.notes # type: ignore md_note_guids = {str(n.guid) for n in md_notes} return md_note_guids -def get_anki_note_infos(deck_bundle: DeckBundle) -> tuple[dict[str, Any], set[str]]: - anki_card_ids: List[int] = invoke( +def get_anki_note_infos( + deck_bundle: DeckBundle +) -> tuple[dict[str, Any], set[str]]: + anki_card_ids: list[int] = invoke( "findCards", - query=f'"deck:{deck_bundle.deck.name}"', + query=f'"deck:{deck_bundle.deck.name}"', # type: ignore ) # get a list of anki notes in the deck - anki_note_ids: List[int] = invoke("cardsToNotes", cards=anki_card_ids) + anki_note_ids: list[int] = invoke( + "cardsToNotes", cards=anki_card_ids + ) # get the note info for the notes in the deck anki_notes_info = invoke("notesInfo", notes=anki_note_ids) # convert the note info into a dictionary of guid to note info anki_note_info_by_guid = { - n["fields"]["UUID"]["value"].replace("

", "").replace("

", "").strip(): n + n["fields"]["UUID"]["value"] + .replace("

", "") + .replace("

", "") + .strip(): n for n in anki_notes_info } @@ -172,10 +210,12 @@ def sync_model(model: Model): model_names_to_ids = {} try: model_names_to_ids = invoke("modelNamesAndIds") - if model.name not in model_names_to_ids: + if model.name not in model_names_to_ids: # type: ignore return except Exception as e: - msg.good("\tUnable to fetch existing model names and ids from anki") + msg.good( + "\tUnable to fetch existing model names and ids from anki" + ) msg.good(f"\t\t{e}") if anki_connect_is_live(): @@ -183,30 +223,29 @@ def sync_model(model: Model): invoke( "updateModelTemplates", model={ - "name": model.name, + "name": model.name, # type: ignore "templates": { t["name"]: { "qfmt": t["qfmt"], "afmt": t["afmt"], } - for t in model.templates + for t in model.templates # type: ignore }, }, ) - msg.good(f"\tUpdated model {model.name} template") + msg.good(f"\tUpdated model {model.name} template") # type: ignore except Exception as e: - msg.good(f"\tUnable to update model {model.name} template") + msg.good( + f"\tUnable to update model {model.name} template" # type: ignore + ) msg.good(f"\t\t{e}") try: invoke( "updateModelStyling", - model={ - "name": model.name, - "css": model.css, - }, + model={"name": model.name, "css": model.css}, # type: ignore ) - msg.good(f"\tUpdated model {model.name} css") + msg.good(f"\tUpdated model {model.name} css") # type: ignore except Exception as e: - msg.good(f"\tUnable to update model {model.name} css") + msg.good(f"\tUnable to update model {model.name} css") # type: ignore msg.good(f"\t\t{e}") diff --git a/src/personal_mnemonic_medium/exporters/base.py b/src/personal_mnemonic_medium/exporters/base.py index f489121..3e13b75 100644 --- a/src/personal_mnemonic_medium/exporters/base.py +++ b/src/personal_mnemonic_medium/exporters/base.py @@ -1,11 +1,15 @@ from abc import ABC, abstractmethod from collections.abc import Sequence -from personal_mnemonic_medium.exporters.anki.card_types.base import AnkiCard +from personal_mnemonic_medium.exporters.anki.card_types.base import ( + AnkiCard, +) from personal_mnemonic_medium.prompt_extractors.prompt import Prompt class CardExporter(ABC): @abstractmethod - def prompts_to_cards(self, prompts: Sequence[Prompt]) -> list[AnkiCard]: + def prompts_to_cards( + self, prompts: Sequence[Prompt] + ) -> list[AnkiCard]: pass diff --git a/src/personal_mnemonic_medium/exporters/markdown_to_html/html_compiler.py b/src/personal_mnemonic_medium/exporters/markdown_to_html/html_compiler.py index d2732b5..9ffe301 100644 --- a/src/personal_mnemonic_medium/exporters/markdown_to_html/html_compiler.py +++ b/src/personal_mnemonic_medium/exporters/markdown_to_html/html_compiler.py @@ -12,7 +12,10 @@ def field_to_html(field: Any) -> str: If math is separated with dollar sign it is converted to brackets. """ if CONFIG["dollar"]: - for sep, (op, cl) in [("$$", (r"\\[", r"\\]")), ("$", (r"\\(", r"\\)"))]: + for sep, (op, cl) in [ + ("$$", (r"\\[", r"\\]")), + ("$", (r"\\(", r"\\)")), + ]: escaped_sep = sep.replace(r"$", r"\$") # ignore escaped dollar signs when splitting the field field = re.split(rf"(? str: token_instances = re.findall(pattern, field) for instance in token_instances: - field = field.replace(instance, replacement + instance[1:-1] + replacement) # type: ignore + field = field.replace( + instance, + replacement + instance[1:-1] + replacement, # type: ignore + ) # type: ignore # Make sure every \n converts into a newline field = field.replace("\n", " \n") @@ -45,7 +51,11 @@ def field_to_html(field: Any) -> str: def compile_field(fieldtext: str) -> str: """Turn source markdown into an HTML field suitable for Anki.""" - fieldtext_sans_wiki = fieldtext.replace("[[", "").replace("]]", "") - fieldtext_sans_comments = re.sub(r"", "", fieldtext_sans_wiki) + fieldtext_sans_wiki = fieldtext.replace("[[", "").replace( + "]]", "" + ) + fieldtext_sans_comments = re.sub( + r"", "", fieldtext_sans_wiki + ) return field_to_html(fieldtext_sans_comments) diff --git a/src/personal_mnemonic_medium/exporters/url_generators/obsidian_url.py b/src/personal_mnemonic_medium/exporters/url_generators/obsidian_url.py index 43f2f92..75d4d03 100644 --- a/src/personal_mnemonic_medium/exporters/url_generators/obsidian_url.py +++ b/src/personal_mnemonic_medium/exporters/url_generators/obsidian_url.py @@ -1,9 +1,10 @@ import urllib from pathlib import Path -from typing import Optional -def get_obsidian_url(source_path: Path, line_nr: Optional[int] = None) -> str: +def get_obsidian_url( + source_path: Path, line_nr: int | None = None +) -> str: """Get the obsidian URI for the source document.""" vault: str = urllib.parse.quote(source_path.parent.name) # type: ignore file: str = urllib.parse.quote(source_path.name) # type: ignore diff --git a/src/personal_mnemonic_medium/note_factories/base.py b/src/personal_mnemonic_medium/note_factories/base.py index cd64ce8..75f91c7 100644 --- a/src/personal_mnemonic_medium/note_factories/base.py +++ b/src/personal_mnemonic_medium/note_factories/base.py @@ -7,7 +7,9 @@ class DocumentFactory(ABC): @abstractmethod - def get_notes_from_dir(self, dir_path: Path) -> Sequence[Document]: + def get_notes_from_dir( + self, dir_path: Path + ) -> Sequence[Document]: pass @abstractmethod diff --git a/src/personal_mnemonic_medium/note_factories/markdown.py b/src/personal_mnemonic_medium/note_factories/markdown.py index 8b54861..f6ba2f8 100644 --- a/src/personal_mnemonic_medium/note_factories/markdown.py +++ b/src/personal_mnemonic_medium/note_factories/markdown.py @@ -2,11 +2,12 @@ import re from collections.abc import Sequence from pathlib import Path -from typing import Optional from tqdm import tqdm -from personal_mnemonic_medium.note_factories.base import DocumentFactory +from personal_mnemonic_medium.note_factories.base import ( + DocumentFactory, +) from personal_mnemonic_medium.note_factories.note import Document @@ -31,7 +32,7 @@ def get_and_append_new_uuid(self, file_path: Path) -> str: def get_note_id(self, file_string: str) -> str: return re.findall(r"" - expected_id = ( - r"" - ) + expected_id = r"" extracted_id = factory.get_note_id(note_str) @@ -133,21 +136,29 @@ def test_get_bear_id(): def test_alias_wiki_link_substitution(): alias = "Here I am [[alias|wiki link]], and another [[alias2|wiki link2]]" - output = Document._replace_alias_wiki_links(alias) - assert output == "Here I am [[wiki link]], and another [[wiki link2]]" + output = Document.replace_alias_wiki_links(alias) + assert ( + output + == "Here I am [[wiki link]], and another [[wiki link2]]" + ) no_alias = "Here I am [[wiki link]] and another [[wiki link2]]" - output = Document._replace_alias_wiki_links(no_alias) - assert output == "Here I am [[wiki link]] and another [[wiki link2]]" + output = Document.replace_alias_wiki_links(no_alias) + assert ( + output == "Here I am [[wiki link]] and another [[wiki link2]]" + ) test_3 = "How was ice climbing [[Franz Josef]] with [[Vibeke Christiansen|Vibeke]]?" - output = Document._replace_alias_wiki_links(test_3) - assert output == "How was ice climbing [[Franz Josef]] with [[Vibeke]]?" + output = Document.replace_alias_wiki_links(test_3) + assert ( + output + == "How was ice climbing [[Franz Josef]] with [[Vibeke]]?" + ) alias = "[[Isolation (database design)|Isolation]]" - output = Document._replace_alias_wiki_links(alias) + output = Document.replace_alias_wiki_links(alias) assert output == "[[Isolation]]" alias = "[[test-test|test-]]" - output = Document._replace_alias_wiki_links(alias) + output = Document.replace_alias_wiki_links(alias) assert output == "[[test-]]" diff --git a/tests/exporters/anki/test_package_generator.py b/tests/exporters/anki/test_package_generator.py index 73c4055..ba41cfd 100644 --- a/tests/exporters/anki/test_package_generator.py +++ b/tests/exporters/anki/test_package_generator.py @@ -1,13 +1,18 @@ from pathlib import Path -import genanki -from personal_mnemonic_medium.exporters.anki.card_types.base import AnkiCard -from personal_mnemonic_medium.exporters.anki.card_types.qa import AnkiQA +from personal_mnemonic_medium.exporters.anki.card_types.base import ( + AnkiCard, +) +from personal_mnemonic_medium.exporters.anki.card_types.qa import ( + AnkiQA, +) from personal_mnemonic_medium.exporters.anki.package_generator import ( AnkiPackageGenerator, ) from personal_mnemonic_medium.note_factories.note import Document -from personal_mnemonic_medium.prompt_extractors.qa_extractor import QAPrompt +from personal_mnemonic_medium.prompt_extractors.qa_extractor import ( + QAPrompt, +) def test_cards_to_decks(): @@ -31,10 +36,7 @@ def test_cards_to_decks(): for _ in range(4) ] - deck, media = AnkiPackageGenerator().cards_to_deck(cards=genanki_notes) - - assert type(deck) == genanki.Deck - assert type(media) == set + AnkiPackageGenerator().cards_to_deck(cards=genanki_notes) def test_package_generators(): @@ -58,6 +60,4 @@ def test_package_generators(): for _ in range(4) ] - AnkiPackageGenerator().cards_to_deck_bundle( - cards=genanki_notes, - ) + AnkiPackageGenerator().cards_to_deck_bundle(cards=genanki_notes) diff --git a/tests/note_factories/test_markdown_extractor.py b/tests/note_factories/test_markdown_extractor.py index ddd8636..df8a045 100644 --- a/tests/note_factories/test_markdown_extractor.py +++ b/tests/note_factories/test_markdown_extractor.py @@ -2,15 +2,27 @@ PROJECT_ROOT = Path(__file__).parent.parent.parent -from personal_mnemonic_medium.note_factories.markdown import MarkdownNoteFactory +from personal_mnemonic_medium.note_factories.markdown import ( + MarkdownNoteFactory, +) def test_get_notes_from_dir(): notes = MarkdownNoteFactory().get_notes_from_dir( - PROJECT_ROOT / "tests" / "test_md_files", + PROJECT_ROOT / "tests" / "test_md_files" ) assert len(notes) == 4 - assert len([note for note in notes if note.title == "test_card_guid"]) == 1 - assert len([note for note in notes if "7696CDCD" in note.content]) == 1 - assert len([note for note in notes if "7696CDCD" in note.uuid]) == 1 + assert ( + len( + [note for note in notes if note.title == "test_card_guid"] + ) + == 1 + ) + assert ( + len([note for note in notes if "7696CDCD" in note.content]) + == 1 + ) + assert ( + len([note for note in notes if "7696CDCD" in note.uuid]) == 1 + ) diff --git a/tests/prompt_extractors/test_cloze_extractor.py b/tests/prompt_extractors/test_cloze_extractor.py index 1d5f567..8267a4b 100644 --- a/tests/prompt_extractors/test_cloze_extractor.py +++ b/tests/prompt_extractors/test_cloze_extractor.py @@ -32,6 +32,8 @@ def test_cloze_no_hits(): source_path=Path(__file__), ) - prompts = ClozePromptExtractor().extract_prompts(note_without_cloze) + prompts = ClozePromptExtractor().extract_prompts( + note_without_cloze + ) assert len(prompts) == 0 diff --git a/tests/prompt_extractors/test_qa_prompt_extractor.py b/tests/prompt_extractors/test_qa_prompt_extractor.py index 233ecd4..d3631c9 100644 --- a/tests/prompt_extractors/test_qa_prompt_extractor.py +++ b/tests/prompt_extractors/test_qa_prompt_extractor.py @@ -2,7 +2,9 @@ import pytest from personal_mnemonic_medium.note_factories.note import Document -from personal_mnemonic_medium.prompt_extractors.qa_extractor import QAPromptExtractor +from personal_mnemonic_medium.prompt_extractors.qa_extractor import ( + QAPromptExtractor, +) @pytest.fixture() @@ -35,18 +37,26 @@ def test_has_qa_matches(qa_extractor: QAPromptExtractor): "QA. Testing something else, even with QA in it!", "\\Q. Testing newlines as well!", ] - matches = [string for string in example_strings if qa_extractor._has_qa(string)] + matches = [ + string + for string in example_strings + if qa_extractor._has_qa(string) # type: ignore + ] assert len(matches) == 3 def test_has_qa_does_not_match(qa_extractor: QAPromptExtractor): - example_strings = ["\nQ.E.D.", "> A question like this, or", "::Q. A comment!::"] + example_strings = [ + "\nQ.E.D.", + "> A question like this, or", + "::Q. A comment!::", + ] matches = 0 for string in example_strings: - if qa_extractor._has_qa(string): + if qa_extractor._has_qa(string): # type: ignore matches += 1 assert matches == 0