From b7e3cec626c0a278deb7a40aa2bb5c88cfde30b3 Mon Sep 17 00:00:00 2001 From: Mike Alfare Date: Thu, 25 Apr 2024 22:39:34 -0400 Subject: [PATCH] make updates from running linters and typecheckers --- dbt/adapters/spark/__init__.py | 2 +- dbt/adapters/spark/column.py | 2 +- dbt/adapters/spark/connections.py | 6 +++--- dbt/adapters/spark/impl.py | 2 +- tests/unit/utils.py | 1 + 5 files changed, 7 insertions(+), 6 deletions(-) diff --git a/dbt/adapters/spark/__init__.py b/dbt/adapters/spark/__init__.py index c25ba40d5..6ecc5eccf 100644 --- a/dbt/adapters/spark/__init__.py +++ b/dbt/adapters/spark/__init__.py @@ -8,5 +8,5 @@ from dbt.include import spark Plugin = AdapterPlugin( - adapter=SparkAdapter, credentials=SparkCredentials, include_path=spark.PACKAGE_PATH # type: ignore + adapter=SparkAdapter, credentials=SparkCredentials, include_path=spark.PACKAGE_PATH ) diff --git a/dbt/adapters/spark/column.py b/dbt/adapters/spark/column.py index 39f6f529e..98fa24a17 100644 --- a/dbt/adapters/spark/column.py +++ b/dbt/adapters/spark/column.py @@ -21,7 +21,7 @@ class SparkColumn(dbtClassMixin, Column): def translate_type(cls, dtype: str) -> str: return dtype - def can_expand_to(self: Self, other_column: Self) -> bool: # type: ignore + def can_expand_to(self: Self, other_column: Self) -> bool: """returns True if both columns are strings""" return self.is_string() and other_column.is_string() diff --git a/dbt/adapters/spark/connections.py b/dbt/adapters/spark/connections.py index 83048f921..0405eaf5b 100644 --- a/dbt/adapters/spark/connections.py +++ b/dbt/adapters/spark/connections.py @@ -65,9 +65,9 @@ class SparkConnectionMethod(StrEnum): @dataclass class SparkCredentials(Credentials): host: Optional[str] = None - schema: Optional[str] = None # type: ignore + schema: Optional[str] = None method: SparkConnectionMethod = None # type: ignore - database: Optional[str] = None # type: ignore + database: Optional[str] = None driver: Optional[str] = None cluster: Optional[str] = None endpoint: Optional[str] = None @@ -568,7 +568,7 @@ def open(cls, connection: Connection) -> Connection: return connection @classmethod - def data_type_code_to_name(cls, type_code: Union[type, str]) -> str: # type: ignore + def data_type_code_to_name(cls, type_code: Union[type, str]) -> str: """ :param Union[type, str] type_code: The sql to execute. * type_code is a python type (!) in pyodbc https://github.com/mkleehammer/pyodbc/wiki/Cursor#description, and a string for other spark runtimes. diff --git a/dbt/adapters/spark/impl.py b/dbt/adapters/spark/impl.py index 9a1a7ec06..255ab7806 100644 --- a/dbt/adapters/spark/impl.py +++ b/dbt/adapters/spark/impl.py @@ -151,7 +151,7 @@ def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str: def convert_datetime_type(cls, agate_table: agate.Table, col_idx: int) -> str: return "timestamp" - def quote(self, identifier: str) -> str: # type: ignore + def quote(self, identifier: str) -> str: return "`{}`".format(identifier) def _get_relation_information(self, row: agate.Row) -> RelationInfo: diff --git a/tests/unit/utils.py b/tests/unit/utils.py index 17cd3ee78..d080242cc 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -2,6 +2,7 @@ Note that all imports should be inside the functions to avoid import/mocking issues. """ + import string import os from unittest import mock