Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DPE-5324] Increase linting rules #740

Merged
merged 23 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
171 changes: 87 additions & 84 deletions lib/charms/postgresql_k8s/v0/postgresql.py

Large diffs are not rendered by default.

7 changes: 2 additions & 5 deletions lib/charms/postgresql_k8s/v0/postgresql_tls.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version.
LIBPATCH = 9
LIBPATCH = 10

logger = logging.getLogger(__name__)
SCOPE = "unit"
Expand Down Expand Up @@ -81,10 +81,7 @@ def _on_set_tls_private_key(self, event: ActionEvent) -> None:

def _request_certificate(self, param: Optional[str]):
"""Request a certificate to TLS Certificates Operator."""
if param is None:
key = generate_private_key()
else:
key = self._parse_tls_file(param)
key = generate_private_key() if param is None else self._parse_tls_file(param)

csr = generate_csr(
private_key=key,
Expand Down
11 changes: 9 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ line-length = 99

[tool.ruff.lint]
explicit-preview-rules = true
select = ["A", "E", "W", "F", "C", "N", "D", "I001", "CPY001"]
select = ["A", "E", "W", "F", "C", "N", "D", "I001", "B", "CPY", "RUF", "S", "SIM", "UP", "TCH"]
extend-ignore = [
"D203",
"D204",
Expand All @@ -130,12 +130,19 @@ extend-ignore = [
ignore = ["E501", "D107"]

[tool.ruff.lint.per-file-ignores]
"tests/*" = ["D100", "D101", "D102", "D103", "D104"]
"tests/*" = [
"D100", "D101", "D102", "D103", "D104",
# Asserts
"B011",
# Disable security checks for tests
"S",
]

[tool.ruff.lint.flake8-copyright]
# Check for properly formatted copyright header in each file
author = "Canonical Ltd."
notice-rgx = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+"
min-file-size = 1

[tool.ruff.lint.mccabe]
max-complexity = 10
Expand Down
2 changes: 1 addition & 1 deletion src/arch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def is_wrong_architecture() -> bool:
)
return False

with open(manifest_file_path, "r") as file:
with open(manifest_file_path) as file:
manifest = file.read()
hw_arch = os.uname().machine
if ("amd64" in manifest and hw_arch == "x86_64") or (
Expand Down
63 changes: 25 additions & 38 deletions src/backups.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,15 +97,16 @@ def _can_initialise_stanza(self) -> bool:
# Don't allow stanza initialisation if this unit hasn't started the database
# yet and either hasn't joined the peer relation yet or hasn't configured TLS
# yet while other unit already has TLS enabled.
if not self.charm._patroni.member_started and (
(len(self.charm._peers.data.keys()) == 2)
or (
"tls" not in self.charm.unit_peer_data
and any("tls" in unit_data for _, unit_data in self.charm._peers.data.items())
return not (
not self.charm._patroni.member_started
and (
(len(self.charm._peers.data.keys()) == 2)
or (
"tls" not in self.charm.unit_peer_data
and any("tls" in unit_data for _, unit_data in self.charm._peers.data.items())
)
Comment on lines +112 to +119
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ugly

)
):
return False
return True
)

def _can_unit_perform_backup(self) -> Tuple[bool, Optional[str]]:
"""Validates whether this unit can perform a backup."""
Expand Down Expand Up @@ -154,11 +155,11 @@ def can_use_s3_repository(self) -> Tuple[bool, Optional[str]]:
])
if error != "":
raise Exception(error)
system_identifier_from_instance = [
system_identifier_from_instance = next(
line
for line in system_identifier_from_instance.splitlines()
if "Database system identifier" in line
][0].split(" ")[-1]
).split(" ")[-1]
system_identifier_from_stanza = str(stanza.get("db")[0]["system-id"])
if system_identifier_from_instance != system_identifier_from_stanza or stanza.get(
"name"
Expand Down Expand Up @@ -254,7 +255,7 @@ def _change_connectivity_to_database(self, connectivity: bool) -> None:
self.charm.update_config(is_creating_backup=True)

def _execute_command(
self, command: List[str], timeout: float = None, stream: bool = False
self, command: List[str], timeout: Optional[float] = None, stream: bool = False
) -> Tuple[Optional[str], Optional[str]]:
"""Execute a command in the workload container."""
try:
Expand Down Expand Up @@ -314,17 +315,7 @@ def _format_backup_list(self, backup_list) -> str:
path,
) in backup_list:
backups.append(
"{:<20s} | {:<19s} | {:<8s} | {:<20s} | {:<23s} | {:<20s} | {:<20s} | {:<8s} | {:s}".format(
backup_id,
backup_action,
backup_status,
reference,
lsn_start_stop,
start,
stop,
backup_timeline,
path,
)
f"{backup_id:<20s} | {backup_action:<19s} | {backup_status:<8s} | {reference:<20s} | {lsn_start_stop:<23s} | {start:<20s} | {stop:<20s} | {backup_timeline:<8s} | {path:s}"
)
return "\n".join(backups)

Expand Down Expand Up @@ -371,7 +362,7 @@ def _generate_backup_list_output(self) -> str:
backup_path,
))

for timeline, (timeline_stanza, timeline_id) in self._list_timelines().items():
for timeline, (_, timeline_id) in self._list_timelines().items():
backup_list.append((
timeline,
"restore",
Expand Down Expand Up @@ -613,7 +604,7 @@ def _is_primary_pgbackrest_service_running(self) -> bool:
try:
primary = self.charm._patroni.get_primary()
except (RetryError, ConnectionError) as e:
logger.error(f"failed to get primary with error {str(e)}")
logger.error(f"failed to get primary with error {e!s}")
return False

if primary is None:
Expand All @@ -631,7 +622,7 @@ def _is_primary_pgbackrest_service_running(self) -> bool:
])
except ExecError as e:
logger.warning(
f"Failed to contact pgBackRest TLS server on {primary_endpoint} with error {str(e)}"
f"Failed to contact pgBackRest TLS server on {primary_endpoint} with error {e!s}"
)
return False

Expand Down Expand Up @@ -722,7 +713,7 @@ def _on_create_backup_action(self, event) -> None: # noqa: C901
Model Name: {self.model.name}
Application Name: {self.model.app.name}
Unit Name: {self.charm.unit.name}
Juju Version: {str(juju_version)}
Juju Version: {juju_version!s}
"""
if not self._upload_content_to_s3(
metadata,
Expand Down Expand Up @@ -790,7 +781,7 @@ def _on_create_backup_action(self, event) -> None: # noqa: C901
),
s3_parameters,
)
error_message = f"Failed to backup PostgreSQL with error: {str(e)}"
error_message = f"Failed to backup PostgreSQL with error: {e!s}"
logger.error(f"Backup failed: {error_message}")
event.fail(error_message)
else:
Expand Down Expand Up @@ -847,7 +838,7 @@ def _on_list_backups_action(self, event) -> None:
event.set_results({"backups": formatted_list})
except ExecError as e:
logger.exception(e)
event.fail(f"Failed to list PostgreSQL backups with error: {str(e)}")
event.fail(f"Failed to list PostgreSQL backups with error: {e!s}")

def _on_restore_action(self, event): # noqa: C901
"""Request that pgBackRest restores a backup."""
Expand All @@ -866,10 +857,8 @@ def _on_restore_action(self, event): # noqa: C901
logger.info("Validating provided backup-id and restore-to-time")
backups = self._list_backups(show_failed=False)
timelines = self._list_timelines()
is_backup_id_real = backup_id and backup_id in backups.keys()
is_backup_id_timeline = (
backup_id and not is_backup_id_real and backup_id in timelines.keys()
)
is_backup_id_real = backup_id and backup_id in backups
is_backup_id_timeline = backup_id and not is_backup_id_real and backup_id in timelines
if backup_id and not is_backup_id_real and not is_backup_id_timeline:
error_message = f"Invalid backup-id: {backup_id}"
logger.error(f"Restore failed: {error_message}")
Expand Down Expand Up @@ -913,7 +902,7 @@ def _on_restore_action(self, event): # noqa: C901
try:
self.container.stop(self.charm._postgresql_service)
except ChangeError as e:
error_message = f"Failed to stop database service with error: {str(e)}"
error_message = f"Failed to stop database service with error: {e!s}"
logger.error(f"Restore failed: {error_message}")
event.fail(error_message)
return
Expand All @@ -937,9 +926,7 @@ def _on_restore_action(self, event): # noqa: C901
except ApiError as e:
# If previous PITR restore was unsuccessful, there are no such endpoints.
if "restore-to-time" not in self.charm.app_peer_data:
error_message = (
f"Failed to remove previous cluster information with error: {str(e)}"
)
error_message = f"Failed to remove previous cluster information with error: {e!s}"
logger.error(f"Restore failed: {error_message}")
event.fail(error_message)
self._restart_database()
Expand All @@ -949,7 +936,7 @@ def _on_restore_action(self, event): # noqa: C901
try:
self._empty_data_files()
except ExecError as e:
error_message = f"Failed to remove contents of the data directory with error: {str(e)}"
error_message = f"Failed to remove contents of the data directory with error: {e!s}"
logger.error(f"Restore failed: {error_message}")
event.fail(error_message)
self._restart_database()
Expand Down Expand Up @@ -1102,7 +1089,7 @@ def _render_pgbackrest_conf_file(self) -> bool:
)

# Open the template pgbackrest.conf file.
with open("templates/pgbackrest.conf.j2", "r") as file:
with open("templates/pgbackrest.conf.j2") as file:
template = Template(file.read())
# Render the template file with the correct values.
rendered = template.render(
Expand Down
45 changes: 26 additions & 19 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,10 @@
PASSWORD_USERS = [*SYSTEM_USERS, "patroni"]


class CannotConnectError(Exception):
"""Cannot run smoke check on connected Database."""


@trace_charm(
tracing_endpoint="tracing_endpoint",
extra_types=(
Expand Down Expand Up @@ -267,7 +271,7 @@ def _pebble_log_forwarding_supported(self) -> bool:
from ops.jujuversion import JujuVersion

juju_version = JujuVersion.from_environ()
return juju_version > JujuVersion(version=str("3.3"))
return juju_version > JujuVersion(version="3.3")

def _generate_metrics_jobs(self, enable_tls: bool) -> Dict:
"""Generate spec for Prometheus scraping."""
Expand Down Expand Up @@ -668,7 +672,7 @@ def _on_config_changed(self, event) -> None:
)
return

def enable_disable_extensions(self, database: str = None) -> None:
def enable_disable_extensions(self, database: Optional[str] = None) -> None:
"""Enable/disable PostgreSQL extensions set through config options.

Args:
Expand Down Expand Up @@ -1207,11 +1211,11 @@ def _on_set_password(self, event: ActionEvent) -> None:
other_cluster_primary = self._patroni.get_primary(
alternative_endpoints=other_cluster_endpoints
)
other_cluster_primary_ip = [
other_cluster_primary_ip = next(
replication_offer_relation.data[unit].get("private-address")
for unit in replication_offer_relation.units
if unit.name.replace("/", "-") == other_cluster_primary
][0]
)
try:
self.postgresql.update_user_password(
username, password, database_host=other_cluster_primary_ip
Expand Down Expand Up @@ -1399,7 +1403,7 @@ def _on_update_status(self, _) -> None:
and services[0].current != ServiceStatus.ACTIVE
):
logger.warning(
"%s pebble service inactive, restarting service" % self._postgresql_service
f"{self._postgresql_service} pebble service inactive, restarting service"
)
try:
container.restart(self._postgresql_service)
Expand Down Expand Up @@ -1598,7 +1602,9 @@ def _remove_from_endpoints(self, endpoints: List[str]) -> None:
self._update_endpoints(endpoints_to_remove=endpoints)

def _update_endpoints(
self, endpoint_to_add: str = None, endpoints_to_remove: List[str] = None
self,
endpoint_to_add: Optional[str] = None,
endpoints_to_remove: Optional[List[str]] = None,
) -> None:
"""Update members IPs."""
# Allow leader to reset which members are part of the cluster.
Expand Down Expand Up @@ -1765,8 +1771,7 @@ def _restart(self, event: RunWithLock) -> None:
try:
for attempt in Retrying(wait=wait_fixed(3), stop=stop_after_delay(300)):
with attempt:
if not self._can_connect_to_postgresql:
assert False
_ = self._can_connect_to_postgresql
except Exception:
logger.exception("Unable to reconnect to postgresql")

Expand All @@ -1791,7 +1796,8 @@ def _can_connect_to_postgresql(self) -> bool:
try:
for attempt in Retrying(stop=stop_after_delay(30), wait=wait_fixed(3)):
with attempt:
assert self.postgresql.get_postgresql_timezones()
if not self.postgresql.get_postgresql_timezones():
raise CannotConnectError
except RetryError:
logger.debug("Cannot connect to database")
return False
Expand Down Expand Up @@ -1862,16 +1868,17 @@ def update_config(self, is_creating_backup: bool = False) -> bool:
# Restart the monitoring service if the password was rotated
container = self.unit.get_container("postgresql")
current_layer = container.get_plan()
if metrics_service := current_layer.services[self._metrics_service]:
if not metrics_service.environment.get("DATA_SOURCE_NAME", "").startswith(
f"user={MONITORING_USER} password={self.get_secret('app', MONITORING_PASSWORD_KEY)} "
):
container.add_layer(
self._metrics_service,
Layer({"services": {self._metrics_service: self._generate_metrics_service()}}),
combine=True,
)
container.restart(self._metrics_service)
if (
metrics_service := current_layer.services[self._metrics_service]
) and not metrics_service.environment.get("DATA_SOURCE_NAME", "").startswith(
f"user={MONITORING_USER} password={self.get_secret('app', MONITORING_PASSWORD_KEY)} "
):
container.add_layer(
self._metrics_service,
Layer({"services": {self._metrics_service: self._generate_metrics_service()}}),
combine=True,
)
container.restart(self._metrics_service)

return True

Expand Down
19 changes: 10 additions & 9 deletions src/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,12 @@
PEER = "database-peers"
BACKUP_USER = "backup"
REPLICATION_USER = "replication"
REPLICATION_PASSWORD_KEY = "replication-password"
REWIND_USER = "rewind"
REWIND_PASSWORD_KEY = "rewind-password"
MONITORING_USER = "monitoring"
MONITORING_PASSWORD_KEY = "monitoring-password"
PATRONI_PASSWORD_KEY = "patroni-password"
TLS_KEY_FILE = "key.pem"
TLS_CA_FILE = "ca.pem"
TLS_CERT_FILE = "cert.pem"
USER = "operator"
USER_PASSWORD_KEY = "operator-password"
WORKLOAD_OS_GROUP = "postgres"
WORKLOAD_OS_USER = "postgres"
METRICS_PORT = "9187"
Expand All @@ -32,10 +27,16 @@
# List of system usernames needed for correct work of the charm/workload.
SYSTEM_USERS = [BACKUP_USER, REPLICATION_USER, REWIND_USER, USER, MONITORING_USER]

SECRET_LABEL = "secret"
SECRET_CACHE_LABEL = "cache"
SECRET_INTERNAL_LABEL = "internal-secret"
SECRET_DELETED_LABEL = "None"
# Labels are not confidential
REPLICATION_PASSWORD_KEY = "replication-password" # noqa: S105
REWIND_PASSWORD_KEY = "rewind-password" # noqa: S105
MONITORING_PASSWORD_KEY = "monitoring-password" # noqa: S105
PATRONI_PASSWORD_KEY = "patroni-password" # noqa: S105
USER_PASSWORD_KEY = "operator-password" # noqa: S105
SECRET_LABEL = "secret" # noqa: S105
SECRET_CACHE_LABEL = "cache" # noqa: S105
SECRET_INTERNAL_LABEL = "internal-secret" # noqa: S105
SECRET_DELETED_LABEL = "None" # noqa: S105

APP_SCOPE = "app"
UNIT_SCOPE = "unit"
Expand Down
Loading
Loading