Skip to content

Commit

Permalink
Merge pull request #198 from Aiven-Open/remove-python-3.9
Browse files Browse the repository at this point in the history
Fixing linters
  • Loading branch information
ettanany authored Jan 20, 2025
2 parents 8045f01 + c224492 commit cdceebc
Show file tree
Hide file tree
Showing 10 changed files with 59 additions and 46 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,6 @@ jobs:
# skipping matrix items that aren't meaningful
matrix:
include:
- mysql-version: "8.0.32"
percona-version: "8.0.32-26-1.focal"
python-version: "3.9"
ubuntu-version: "20.04"
- mysql-version: "8.0.32"
percona-version: "8.0.32-26-1.focal"
python-version: "3.10"
Expand All @@ -99,6 +95,10 @@ jobs:
percona-version: "8.0.35-30-1.focal"
python-version: "3.11"
ubuntu-version: "20.04"
- mysql-version: "8.0.35"
percona-version: "8.0.35-30-1.focal"
python-version: "3.12"
ubuntu-version: "20.04"

steps:
- id: checkout-code
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ backed up.

# Requirements

MyHoard requires Python 3.9 or later and some additional components to operate:
MyHoard requires Python 3.10 or later and some additional components to operate:

- [percona-xtrabackup](https://github.com/percona/percona-xtrabackup)
- [python3-PyMySQL](https://github.com/PyMySQL/PyMySQL)
Expand Down
34 changes: 17 additions & 17 deletions myhoard/backup_stream.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
# Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/
from rohmu.errors import FileNotFoundFromStorageError

from .append_only_state_manager import AppendOnlyStateManager
from .basebackup_operation import BasebackupOperation
from .binary_io_slice import BinaryIOSlice
Expand Down Expand Up @@ -33,11 +31,13 @@
from rohmu import errors as rohmu_errors
from rohmu.compressor import CompressionStream
from rohmu.encryptor import EncryptorStream
from rohmu.errors import FileNotFoundFromStorageError
from rohmu.object_storage.s3 import S3Transfer
from socket import gaierror
from socks import GeneralProxyError, ProxyConnectionError
from ssl import SSLEOFError
from typing import Any, Callable, cast, Dict, Iterable, Iterator, List, Optional, Set, Tuple, TYPE_CHECKING, TypedDict

import contextlib
import enum
import json
Expand Down Expand Up @@ -256,6 +256,8 @@ def __init__(
"stream_id": stream_id,
"updated_at": time.time(),
"valid_local_binlog_found": False,
"number_of_splits": None,
"split_size": None,
}
self.state_manager = StateManager(lock=self.lock, state=self.state, state_file=state_file)
self.stats = stats
Expand Down Expand Up @@ -672,8 +674,7 @@ def upload_progress(bytes_sent):
# a separate measurement because uploads are not ongoing all the time and calculating
# rate based on raw byte counter requires knowing when the operation started and ended
self.stats.gauge_int(
"myhoard.backup_stream.basebackup_bytes_uploaded",
self.basebackup_bytes_uploaded + bytes_sent
"myhoard.backup_stream.basebackup_bytes_uploaded", self.basebackup_bytes_uploaded + bytes_sent
)
last_value[0], last_time[0] = track_rate(
current=bytes_sent,
Expand All @@ -698,10 +699,7 @@ def upload_progress(bytes_sent):

self.log.info("Uploading basebackup to %s", storage_file_name)
file_storage.store_file_object(
storage_file_name,
stream_to_use,
metadata=metadata,
upload_progress_fn=upload_progress
storage_file_name, stream_to_use, metadata=metadata, upload_progress_fn=upload_progress
)

# Unfortunately, at least for GCP, the upload_progress_fn doesn't get called for the last chunk,
Expand All @@ -714,12 +712,14 @@ def upload_progress(bytes_sent):
if not self.split_size or self.split_size < 1:
break

metadata = make_fs_metadata({
**metadata_template,
"number_of_splits": split_nr,
"split_size": self.split_size,
"basebackup_compressed_size": self.basebackup_bytes_uploaded,
})
metadata = make_fs_metadata(
{
**metadata_template,
"number_of_splits": split_nr,
"split_size": self.split_size,
"basebackup_compressed_size": self.basebackup_bytes_uploaded,
}
)
self.state_manager.update_state(basebackup_file_metadata=metadata)
self.log.info("Done uploading basebackup files, split into %d files", split_nr)

Expand Down Expand Up @@ -1265,9 +1265,9 @@ def _upload_binlog(self, binlog: BinlogInfo) -> bool:
index_name,
encrypt_stream,
metadata=metadata,
upload_progress_fn=self.binlog_progress_tracker.increment
if self.binlog_progress_tracker
else None,
upload_progress_fn=(
self.binlog_progress_tracker.increment if self.binlog_progress_tracker else None
),
)
self.stats.increase("myhoard.binlog.upload")
if self.file_uploaded_callback:
Expand Down
4 changes: 2 additions & 2 deletions myhoard/basebackup_operation.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/
from contextlib import suppress
from distutils.version import LooseVersion # pylint:disable=deprecated-module
from myhoard.errors import BlockMismatchError, XtraBackupError
from myhoard.util import get_mysql_version, mysql_cursor
from packaging.version import Version
from rohmu.util import increase_pipe_capacity, set_stream_nonblocking
from typing import Optional

Expand Down Expand Up @@ -156,7 +156,7 @@ def _optimize_tables(self) -> None:
params["timeout"] = CURSOR_TIMEOUT_DURING_OPTIMIZE
with mysql_cursor(**params) as cursor:
version = get_mysql_version(cursor)
if LooseVersion(version) < LooseVersion("8.0.29"):
if Version(version) < Version("8.0.29"):
return

# allow OPTIMIZE TABLE to run on tables without primary keys
Expand Down
14 changes: 7 additions & 7 deletions myhoard/binary_io_slice.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Copyright (c) 2024 Aiven, Helsinki, Finland. https://aiven.io/
from types import TracebackType
from typing import BinaryIO, Type, Iterable, Iterator
from typing import BinaryIO, Iterable, Iterator, Type


class MethodNotSupportedError(Exception):
Expand All @@ -14,7 +14,7 @@ def __init__(self, max_file_size: int, stream: BinaryIO):
self._size_remaining = max_file_size
self.stream = stream

def read(self, __n: int = ...) -> bytes:
def read(self, __n: int = -1) -> bytes:
if __n < 0:
to_read = self._size_remaining
else:
Expand Down Expand Up @@ -53,13 +53,13 @@ def isatty(self) -> bool:
def readable(self) -> bool:
return self.stream.readable()

def readline(self, __limit: int = ...) -> bytes:
def readline(self, __limit: int = -1) -> bytes:
raise MethodNotSupportedError()

def readlines(self, __hint: int = ...) -> list[bytes]:
def readlines(self, __hint: int = -1) -> list[bytes]:
raise MethodNotSupportedError()

def seek(self, __offset: int, __whence: int = ...) -> int:
def seek(self, __offset: int, __whence: int = 0) -> int:
return self.stream.seek(__offset, __whence)

def seekable(self) -> bool:
Expand All @@ -68,7 +68,7 @@ def seekable(self) -> bool:
def tell(self) -> int:
return self._max_file_size - self._size_remaining

def truncate(self, __size: int | None = ...) -> int:
def truncate(self, __size: int | None = None) -> int:
raise MethodNotSupportedError()

def writable(self) -> bool:
Expand All @@ -90,6 +90,6 @@ def __enter__(self) -> BinaryIO:
return self.stream.__enter__()

def __exit__(
self, __t: Type[BaseException] | None, __value: BaseException | None, __traceback: TracebackType | None
self, __t: Type[BaseException] | None, __value: BaseException | None, __traceback: TracebackType | None
) -> None:
return self.stream.__exit__(__t, __value, __traceback)
7 changes: 4 additions & 3 deletions myhoard/restore_coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -855,7 +855,7 @@ def _load_file_data(self, name, missing_ok=False):

def _basebackup_data_provider(self, target_stream) -> None:
compressed_size = self.state["basebackup_info"].get("compressed_size")
with (self.file_storage_pool.with_transfer(self.file_storage_config) as file_storage):
with self.file_storage_pool.with_transfer(self.file_storage_config) as file_storage:
last_time = [time.monotonic()]
last_value = [0]
self.basebackup_bytes_downloaded = 0
Expand All @@ -865,8 +865,9 @@ def _basebackup_data_provider(self, target_stream) -> None:
def download_progress(progress, max_progress):
if progress and max_progress and compressed_size:
# progress may be the actual number of bytes or it may be percentages
self.basebackup_bytes_downloaded = total_at_last_split_download[0] \
+ int(current_file_size[0] * progress / max_progress)
self.basebackup_bytes_downloaded = total_at_last_split_download[0] + int(
current_file_size[0] * progress / max_progress
)

# Track both absolute number and explicitly calculated rate. The rate can be useful as
# a separate measurement because downloads are not ongoing all the time and calculating
Expand Down
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@ authors = [
]
description = "MyHoard is a daemon for creating, managing and restoring MySQL backups."
readme = "README.md"
requires-python = ">=3.9"
requires-python = ">=3.10"
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Database :: Database Engines/Servers",
"Topic :: Software Development :: Libraries",
]
Expand All @@ -37,6 +37,7 @@ dependencies = [
"python-snappy == 0.6.1",
"rohmu >= 1.1.2",
"sentry-sdk==1.14.0",
"packaging",
]

[project.optional-dependencies]
Expand Down
4 changes: 2 additions & 2 deletions test/test_backup_stream.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
# Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/
import rohmu

from . import build_statsd_client, generate_rsa_key_pair, MySQLConfig, wait_for_condition
from myhoard.backup_stream import BackupStream
from myhoard.binlog_scanner import BinlogScanner
Expand All @@ -13,6 +11,7 @@
import myhoard.util as myhoard_util
import os
import pytest
import rohmu

pytestmark = [pytest.mark.unittest, pytest.mark.all]

Expand Down Expand Up @@ -150,6 +149,7 @@ def _run_backup_stream_test(session_tmpdir, mysql_master: MySQLConfig, backup_st
"directory": backup_target_location,
"storage_type": "local",
},
"split_size": None,
}
}
backups = Controller.get_backup_list(backup_sites)
Expand Down
4 changes: 2 additions & 2 deletions test/test_basebackup_operation.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/
from . import build_statsd_client, MySQLConfig, restart_mysql
from distutils.version import LooseVersion # pylint:disable=deprecated-module
from myhoard.basebackup_operation import BasebackupOperation
from packaging.version import Version
from typing import IO
from unittest import SkipTest
from unittest.mock import mock_open, patch
Expand Down Expand Up @@ -139,7 +139,7 @@ def stream_handler(_stream):
def test_backup_with_non_optimized_tables(mysql_master: MySQLConfig) -> None:
with myhoard_util.mysql_cursor(**mysql_master.connect_options) as cursor:
version = myhoard_util.get_mysql_version(cursor)
if LooseVersion(version) < LooseVersion("8.0.29"):
if Version(version) < Version("8.0.29"):
raise SkipTest("DB version doesn't need OPTIMIZE TABLE")

def create_test_db(*, db_name: str, table_name: str, add_pk: bool) -> None:
Expand Down
23 changes: 17 additions & 6 deletions test/test_restore_coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,12 @@ def test_restore_coordinator(session_tmpdir, mysql_master, mysql_empty):

def test_restore_coordinator_with_split_basebackup(session_tmpdir, mysql_master, mysql_empty):
_restore_coordinator_sequence(
session_tmpdir, mysql_master, mysql_empty, pitr=False, rebuild_tables=False, fail_and_resume=False,
session_tmpdir,
mysql_master,
mysql_empty,
pitr=False,
rebuild_tables=False,
fail_and_resume=False,
split_size=10_000,
)

Expand All @@ -48,7 +53,14 @@ def test_restore_coordinator_resume_rebuild_tables(session_tmpdir, mysql_master,


def _restore_coordinator_sequence(
session_tmpdir, mysql_master, mysql_empty, *, pitr: bool, rebuild_tables: bool, fail_and_resume: bool, split_size: int=0,
session_tmpdir,
mysql_master,
mysql_empty,
*,
pitr: bool,
rebuild_tables: bool,
fail_and_resume: bool,
split_size: int = 0,
):
with myhoard_util.mysql_cursor(**mysql_master.connect_options) as cursor:
cursor.execute("CREATE DATABASE db1")
Expand Down Expand Up @@ -428,6 +440,7 @@ def test_empty_last_relay(running_state, session_tmpdir, mysql_master, mysql_emp


def test_restore_coordinator_check_parameter_before_restart(session_tmpdir):
# pylint: disable=W0212,W0108
restarts = []

state_file = os.path.join(session_tmpdir().strpath, "the_state_file.json")
Expand All @@ -436,10 +449,8 @@ def _register_restart(**kwargs):
restarts.append({**kwargs})

rc = RestoreCoordinator(
binlog_streams=[
],
file_storage_config={
},
binlog_streams=[],
file_storage_config={},
free_memory_percentage=80,
mysql_client_params={},
mysql_config_file_name="",
Expand Down

0 comments on commit cdceebc

Please sign in to comment.