Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MISC] Suppress oversee user action #509

Draft
wants to merge 7 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions actions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,3 +62,5 @@ set-tls-private-key:
private-key:
type: string
description: The content of private key for communications with clients. Content will be auto-generated if this option is not specified.
reenable-oversee-users:
description: Reenable purging of managed credentials after a standby cluster is promoted.
Comment on lines +65 to +66
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

any reason why it is not run as part of promote-to-primary action?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The original issue was the promoting the replica cluster deleted users for relations to the old primary cluster, since there were no relations for them in the new primary cluster. We can't know if the client charms got rerelated to the new primary cluster, so we can't know when it's safe to reenable that functionality without user input.

15 changes: 15 additions & 0 deletions src/relations/async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ def __init__(self, charm):
self.framework.observe(
self.charm.on.promote_to_primary_action, self._on_promote_to_primary
)
self.framework.observe(
self.charm.on.reenable_oversee_users_action, self._on_reenable_oversee_users
)

self.framework.observe(self.charm.on.secret_changed, self._on_secret_changed)

Expand Down Expand Up @@ -600,6 +603,18 @@ def _on_promote_to_primary(self, event: ActionEvent) -> None:
# Set the status.
self.charm.unit.status = MaintenanceStatus("Creating replication...")

def _on_reenable_oversee_users(self, event: ActionEvent) -> None:
"""Re-enable oversee users after cluster was promoted."""
if not self.charm.unit.is_leader():
event.fail("Unit is not leader")
return

if "suppress-oversee-users" not in self.charm.app_peer_data:
event.fail("Oversee users is not suppressed")
return

del self.charm.app_peer_data["suppress-oversee-users"]

def _on_secret_changed(self, event: SecretChangedEvent) -> None:
"""Update the internal secret when the relation secret changes."""
relation = self._relation
Expand Down
22 changes: 21 additions & 1 deletion tests/integration/ha_tests/test_async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import contextlib
import logging
import subprocess
from asyncio import gather
from asyncio import gather, sleep
from typing import Optional

import psycopg2
Expand Down Expand Up @@ -318,6 +318,26 @@ async def test_data_integrator_creds_keep_on_working(
finally:
connection.close()

logger.info("Re-enable oversee users")
leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME, model=second_model)
action = await leader_unit.run_action(action_name="reenable-oversee-users")
await action.wait()

async with fast_forward(second_model, FAST_INTERVAL):
await sleep(20)
await second_model.wait_for_idle(
apps=[DATABASE_APP_NAME],
status="active",
timeout=TIMEOUT,
)
try:
with psycopg2.connect(connstr) as connection:
assert False
except psycopg2.OperationalError:
logger.info("Data integrator creds purged")
finally:
connection.close()


@pytest.mark.group(1)
@markers.juju3
Expand Down
48 changes: 48 additions & 0 deletions tests/unit/test_async_replication.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Copyright 2023 Canonical Ltd.
# See LICENSE file for licensing details.
from unittest.mock import Mock

import pytest
from ops.testing import Harness

from charm import PostgresqlOperatorCharm


@pytest.fixture(autouse=True)
def harness():
"""Set up the test."""
harness = Harness(PostgresqlOperatorCharm)
harness.begin()
upgrade_relation_id = harness.add_relation("upgrade", "postgresql")
peer_relation_id = harness.add_relation("database-peers", "postgresql")
for rel_id in (upgrade_relation_id, peer_relation_id):
harness.add_relation_unit(rel_id, "postgresql/1")
with harness.hooks_disabled():
harness.update_relation_data(upgrade_relation_id, "postgresql/1", {"state": "idle"})
yield harness
harness.cleanup()


def test_on_reenable_oversee_users(harness):
# Fail if unit is not leader
event = Mock()

harness.charm.async_replication._on_reenable_oversee_users(event)

event.fail.assert_called_once_with("Unit is not leader")
event.fail.reset_mock()

# Fail if peer data is not set
with harness.hooks_disabled():
harness.set_leader()

harness.charm.async_replication._on_reenable_oversee_users(event)

event.fail.assert_called_once_with("Oversee users is not suppressed")
event.fail.reset_mock()

with harness.hooks_disabled():
harness.charm._peers.data[harness.charm.app].update({"suppress-oversee-users": "true"})

harness.charm.async_replication._on_reenable_oversee_users(event)
assert harness.charm._peers.data[harness.charm.app] == {}
Loading