Skip to content

Commit

Permalink
Lock file maintenance Python dependencies (#752)
Browse files Browse the repository at this point in the history
* Lock file maintenance Python dependencies

* Fix linting

---------

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Dragomir Penev <[email protected]>
  • Loading branch information
renovate[bot] and dragomirp authored Oct 30, 2024
1 parent dfab630 commit 96f5ac5
Show file tree
Hide file tree
Showing 21 changed files with 176 additions and 174 deletions.
6 changes: 3 additions & 3 deletions lib/charms/postgresql_k8s/v0/postgresql.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 37
LIBPATCH = 38

INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles"

Expand Down Expand Up @@ -244,7 +244,7 @@ def create_user(
privilege for privilege in privileges if privilege not in valid_privileges
]
if len(invalid_privileges) > 0:
logger.error(f'Invalid extra user roles: {", ".join(privileges)}')
logger.error(f"Invalid extra user roles: {', '.join(privileges)}")
raise PostgreSQLCreateUserError(INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE)

with self._connect_to_database() as connection, connection.cursor() as cursor:
Expand All @@ -256,7 +256,7 @@ def create_user(
user_definition = "CREATE ROLE {}"
user_definition += f"WITH {'NOLOGIN' if user == 'admin' else 'LOGIN'}{' SUPERUSER' if admin else ''} ENCRYPTED PASSWORD '{password}'{'IN ROLE admin CREATEDB' if admin_role else ''}"
if privileges:
user_definition += f' {" ".join(privileges)}'
user_definition += f" {' '.join(privileges)}"
cursor.execute(sql.SQL("BEGIN;"))
cursor.execute(sql.SQL("SET LOCAL log_statement = 'none';"))
cursor.execute(sql.SQL(f"{user_definition};").format(sql.Identifier(user)))
Expand Down
60 changes: 30 additions & 30 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ package-mode = false
python = "^3.10"
ops = "^2.17.0"
cryptography = "^43.0.3"
boto3 = "^1.35.47"
boto3 = "^1.35.50"
pgconnstr = "^1.0.1"
requests = "^2.32.3"
tenacity = "^9.0.0"
Expand Down Expand Up @@ -39,7 +39,7 @@ opentelemetry-exporter-otlp-proto-http = "1.21.0"
optional = true

[tool.poetry.group.format.dependencies]
ruff = "^0.7.0"
ruff = "^0.7.1"

[tool.poetry.group.lint]
optional = true
Expand Down Expand Up @@ -72,7 +72,7 @@ allure-pytest-collection-report = {git = "https://github.com/canonical/data-plat
# renovate caret doesn't work: https://github.com/renovatebot/renovate/issues/26940
juju = "<=3.5.2.0"
psycopg2-binary = "^2.9.10"
boto3 = "^1.35.47"
boto3 = "^1.35.50"
tenacity = "^9.0.0"
allure-pytest = "^2.13.5"

Expand Down
16 changes: 8 additions & 8 deletions src/backups.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def can_use_s3_repository(self) -> Tuple[bool, Optional[str]]:
return False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE

system_identifier_from_instance, error = self._execute_command([
f'/usr/lib/postgresql/{self.charm._patroni.rock_postgresql_version.split(".")[0]}/bin/pg_controldata',
f"/usr/lib/postgresql/{self.charm._patroni.rock_postgresql_version.split('.')[0]}/bin/pg_controldata",
"/var/lib/postgresql/data/pgdata",
])
if error != "":
Expand Down Expand Up @@ -214,7 +214,7 @@ def _construct_endpoint(self, s3_parameters: Dict) -> str:

# Use the built endpoint if it is an AWS endpoint.
if endpoint_data and endpoint.endswith(endpoint_data["dnsSuffix"]):
endpoint = f'{endpoint.split("://")[0]}://{endpoint_data["hostname"]}'
endpoint = f"{endpoint.split('://')[0]}://{endpoint_data['hostname']}"

return endpoint

Expand Down Expand Up @@ -366,7 +366,7 @@ def _generate_backup_list_output(self) -> str:
backup_reference = "None"
if backup["reference"]:
backup_reference, _ = self._parse_backup_id(backup["reference"][-1])
lsn_start_stop = f'{backup["lsn"]["start"]} / {backup["lsn"]["stop"]}'
lsn_start_stop = f"{backup['lsn']['start']} / {backup['lsn']['stop']}"
time_start, time_stop = (
datetime.strftime(
datetime.fromtimestamp(stamp, timezone.utc), "%Y-%m-%dT%H:%M:%SZ"
Expand All @@ -378,7 +378,7 @@ def _generate_backup_list_output(self) -> str:
if backup["archive"] and backup["archive"]["start"]
else ""
)
backup_path = f'/{self.stanza_name}/{backup["label"]}'
backup_path = f"/{self.stanza_name}/{backup['label']}"
error = backup["error"]
backup_status = "finished"
if error:
Expand Down Expand Up @@ -1060,16 +1060,16 @@ def _generate_fake_backup_id(self, backup_type: str) -> str:

if last_full_backup is None:
raise TypeError("Differential backup requested but no previous full backup")
return f'{last_full_backup}_{datetime.strftime(datetime.now(), "%Y%m%d-%H%M%SD")}'
return f"{last_full_backup}_{datetime.strftime(datetime.now(), '%Y%m%d-%H%M%SD')}"
if backup_type == "incremental":
backups = self._list_backups(show_failed=False, parse=False).keys()
if not backups:
raise TypeError("Incremental backup requested but no previous successful backup")
return f'{backups[-1]}_{datetime.strftime(datetime.now(), "%Y%m%d-%H%M%SI")}'
return f"{backups[-1]}_{datetime.strftime(datetime.now(), '%Y%m%d-%H%M%SI')}"

def _fetch_backup_from_id(self, backup_id: str) -> str:
"""Fetches backup's pgbackrest label from backup id."""
timestamp = f'{datetime.strftime(datetime.strptime(backup_id, "%Y-%m-%dT%H:%M:%SZ"), "%Y%m%d-%H%M%S")}'
timestamp = f"{datetime.strftime(datetime.strptime(backup_id, '%Y-%m-%dT%H:%M:%SZ'), '%Y%m%d-%H%M%S')}"
backups = self._list_backups(show_failed=False, parse=False).keys()
for label in backups:
if timestamp in label:
Expand Down Expand Up @@ -1253,7 +1253,7 @@ def _retrieve_s3_parameters(self) -> Tuple[Dict, List[str]]:
# like Ceph Object Gateway (radosgw).
s3_parameters["endpoint"] = s3_parameters["endpoint"].rstrip("/")
s3_parameters["path"] = (
f'/{s3_parameters["path"].strip("/")}' # The slash in the beginning is required by pgBackRest.
f"/{s3_parameters['path'].strip('/')}" # The slash in the beginning is required by pgBackRest.
)
s3_parameters["bucket"] = s3_parameters["bucket"].strip("/")

Expand Down
2 changes: 1 addition & 1 deletion src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ def postgresql(self) -> PostgreSQL:
@property
def endpoint(self) -> str:
"""Returns the endpoint of this instance's pod."""
return f'{self._unit.replace("/", "-")}.{self._build_service_name("endpoints")}'
return f"{self._unit.replace('/', '-')}.{self._build_service_name('endpoints')}"

@property
def primary_endpoint(self) -> str:
Expand Down
4 changes: 2 additions & 2 deletions src/relations/async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def get_system_identifier(self) -> Tuple[Optional[str], Optional[str]]:
try:
system_identifier, error = self.container.exec(
[
f'/usr/lib/postgresql/{self.charm._patroni.rock_postgresql_version.split(".")[0]}/bin/pg_controldata',
f"/usr/lib/postgresql/{self.charm._patroni.rock_postgresql_version.split('.')[0]}/bin/pg_controldata",
POSTGRESQL_DATA_PATH,
],
user=WORKLOAD_OS_USER,
Expand Down Expand Up @@ -635,7 +635,7 @@ def _primary_cluster_endpoint(self) -> str:
def _re_emit_async_relation_changed_event(self) -> None:
"""Re-emit the async relation changed event."""
relation = self._relation
getattr(self.charm.on, f'{relation.name.replace("-", "_")}_relation_changed').emit(
getattr(self.charm.on, f"{relation.name.replace('-', '_')}_relation_changed").emit(
relation,
app=relation.app,
unit=[unit for unit in relation.units if unit.app == relation.app][0],
Expand Down
Loading

0 comments on commit 96f5ac5

Please sign in to comment.