Skip to content

Commit

Permalink
move to src folder
Browse files Browse the repository at this point in the history
  • Loading branch information
watermarkhu committed Feb 19, 2024
1 parent ded0a14 commit a3a53a4
Show file tree
Hide file tree
Showing 18 changed files with 40 additions and 47 deletions.
6 changes: 1 addition & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.2.1
rev: v0.2.2
hooks:
# Run the linter.
- id: ruff
args: [--fix]
# Run the formatter.
- id: ruff-format
- repo: https://github.com/pre-commit/mirrors-mypy
rev: 'v1.8.0' # Use the sha / tag you want to point at
hooks:
- id: mypy
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ license = "MIT"
readme = "README.md"
repository = "https://github.com/watermarkhu/textmate-grammar-python"
keywords = ["textmate", "tokenization"]
packages = [{include = "textmate_grammar"}]
packages = [{include = "textmate_grammar", from = "src"}]

[tool.poetry.dependencies]
python = "^3.11"
Expand All @@ -30,8 +30,8 @@ types-pyyaml = "^6.0.12.12"
##################################### ruff #####################################
ruff = "^0.2.1"
[tool.ruff]
include = ["pyproject.toml", "textmate_grammar/**/*.py"]
exclude = ["textmate_grammar/grammars/"]
include = ["pyproject.toml", "src/textmate_grammar/**/*.py"]
exclude = ["src/textmate_grammar/grammars/"]
line-length = 100
indent-width = 4

Expand Down
File renamed without changes.
14 changes: 11 additions & 3 deletions textmate_grammar/elements.py → src/textmate_grammar/elements.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ def find(
stop_tokens: str | list[str] = "",
verbosity: int = -1,
stack: list[str] | None = None,
attribute: str = "_subelements",
) -> Generator[tuple[ContentElement, list[str]], None, None]:
"""Find the next subelement that match the input token(s).
Expand All @@ -195,7 +196,8 @@ def find(

if verbosity:
verbosity -= 1
for child in self._subelements:
children: list[ContentElement] = getattr(self, attribute, self._subelements)
for child in children:
if stop_tokens and (
child.token in stop_tokens
or (stop_tokens == ["*"] and child.token not in tokens)
Expand All @@ -212,10 +214,16 @@ def find(
return None

def findall(
self, tokens: str | list[str], stop_tokens: str | list[str] = "", verbosity: int = -1
self,
tokens: str | list[str],
stop_tokens: str | list[str] = "",
verbosity: int = -1,
attribute: str = "_subelements",
) -> list[tuple[ContentElement, list[str]]]:
"""Returns subelements that match the input token(s)."""
return list(self.find(tokens, stop_tokens=stop_tokens, verbosity=verbosity))
return list(
self.find(tokens, stop_tokens=stop_tokens, verbosity=verbosity, attribute=attribute)
)

def flatten(self) -> list[tuple[tuple[int, int], str, list[str]]]:
"""Converts the object to a flattened array of tokens per index."""
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,21 +1,18 @@
from pathlib import Path
import shutil
import yaml
from pathlib import Path

import yaml

tmLanguageFile = (
Path(__file__).parents[3]
/ "syntaxes"
/ "markdown"
/ "markdown.tmLanguage.base.yaml"
Path(__file__).parents[3] / "syntaxes" / "markdown" / "markdown.tmLanguage.base.yaml"
)
tmLanguageYAML = Path(__file__).parent / "grammar.yaml"


if tmLanguageFile.exists():
shutil.copyfile(tmLanguageFile, tmLanguageYAML)

with open(tmLanguageYAML, "r") as file:
with open(tmLanguageYAML) as file:
try:
GRAMMAR = yaml.load(file.read(), Loader=yaml.CLoader)
except ImportError:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from pathlib import Path
import plistlib
import yaml
from pathlib import Path

import yaml

tmLanguageFile = (
Path(__file__).parents[3]
Expand All @@ -20,7 +20,7 @@
with open(tmLanguageYAML, "w") as f:
f.write(yaml.dump(GRAMMAR, indent=2))
else:
with open(tmLanguageYAML, "r") as file:
with open(tmLanguageYAML) as file:
try:
GRAMMAR = yaml.load(file.read(), Loader=yaml.CLoader)
except ImportError:
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
28 changes: 11 additions & 17 deletions test/regression/__init__.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,39 @@
import re
import os
import logging
import warnings
import os
import platform
import re
import subprocess
from pathlib import Path
import warnings
from abc import ABC, abstractmethod
from itertools import groupby
from pathlib import Path


from textmate_grammar.language import LanguageParser
import textmate_grammar

from textmate_grammar.language import LanguageParser

MODULE_ROOT = Path(textmate_grammar.__path__[0])
INDEX = MODULE_ROOT.parent / "test" / "regression" / "node_root" / "index.js"
INDEX = MODULE_ROOT.parents[1] / "test" / "regression" / "node_root" / "index.js"

logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("textmate_grammar").setLevel(logging.INFO)

if platform.system() != "Linux":
warnings.warn(f"Regression tests on {os.name} is not supported")
warnings.warn(f"Regression tests on {os.name} is not supported", stacklevel=1)

elif (
"CI" not in os.environ or not os.environ["CI"] or "GITHUB_RUN_ID" not in os.environ
):
elif "CI" not in os.environ or not os.environ["CI"] or "GITHUB_RUN_ID" not in os.environ:
nvm_dir = Path(os.environ["HOME"]) / ".nvm"
nvm_script = nvm_dir / "nvm.sh"
env = os.environ.copy()
env["NVM_DIR"] = str(nvm_dir)

if not nvm_script.exists():
raise EnvironmentError(
raise OSError(
'Node environment not setup. Please run "bash install.sh" in the test/regression directory. '
)

pipe = subprocess.Popen(
f". {nvm_script}; env", stdout=subprocess.PIPE, shell=True, env=env
)
pipe = subprocess.Popen(f". {nvm_script}; env", stdout=subprocess.PIPE, shell=True, env=env)
output = pipe.communicate()[0]
NODE_ENV = dict((line.split("=", 1) for line in output.decode().splitlines()))
NODE_ENV = dict(line.split("=", 1) for line in output.decode().splitlines())

else:
NODE_ENV = os.environ.copy()
Expand Down
16 changes: 7 additions & 9 deletions test/regression/test_matlab.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
from pathlib import Path
import logging
import pytest
from pathlib import Path

from textmate_grammar.language import LanguageParser
import pytest
from textmate_grammar.grammars import matlab
from . import RegressionTestClass, MODULE_ROOT
from textmate_grammar.language import LanguageParser

from . import MODULE_ROOT, RegressionTestClass

logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("textmate_grammar").setLevel(logging.INFO)
parser = LanguageParser(matlab.GRAMMAR)

test_files = (
[
str(MODULE_ROOT.parent / "syntaxes" / "matlab" / (file + ".m"))
str(MODULE_ROOT.parents[1] / "syntaxes" / "matlab" / (file + ".m"))
for file in [
"Account",
"AnEnum",
Expand All @@ -23,10 +24,7 @@
"PropertyValidation",
]
]
+ [
str(test)
for test in (MODULE_ROOT.parent / "syntaxes" / "matlab" / "test").glob("*.m")
]
+ [str(test) for test in (MODULE_ROOT.parents[1] / "syntaxes" / "matlab" / "test").glob("*.m")]
+ [
str(Path(__file__).parent.resolve() / "matlab" / (file + ".m"))
for file in ["test_multiple_inheritance_ml"]
Expand Down

0 comments on commit a3a53a4

Please sign in to comment.