Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test async replication tests fix #502

Draft
wants to merge 4 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ jobs:
- lint
- unit-test
- build
uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@v16.0.0
uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@dpe-4685-multiple-models-status-and-logs
with:
artifact-prefix: ${{ needs.build.outputs.artifact-prefix }}
architecture: ${{ matrix.architecture }}
Expand Down
24 changes: 12 additions & 12 deletions src/relations/async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,12 @@ def __init__(self, charm):
super().__init__(charm, "postgresql")
self.charm = charm
self.framework.observe(
self.charm.on[REPLICATION_OFFER_RELATION].relation_joined,
self._on_async_relation_joined,
self.charm.on[REPLICATION_OFFER_RELATION].relation_created,
self._on_async_relation_created,
)
self.framework.observe(
self.charm.on[REPLICATION_CONSUMER_RELATION].relation_joined,
self._on_async_relation_joined,
self.charm.on[REPLICATION_CONSUMER_RELATION].relation_created,
self._on_async_relation_created,
)
self.framework.observe(
self.charm.on[REPLICATION_OFFER_RELATION].relation_changed,
Expand Down Expand Up @@ -481,7 +481,7 @@ def is_primary_cluster(self) -> bool:
return self.charm.app == self._get_primary_cluster()

def _on_async_relation_broken(self, _) -> None:
if "departing" in self.charm._peers.data[self.charm.unit]:
if self.charm._peers is None or "departing" in self.charm._peers.data[self.charm.unit]:
logger.debug("Early exit on_async_relation_broken: Skipping departing unit.")
return

Expand Down Expand Up @@ -546,13 +546,7 @@ def _on_async_relation_changed(self, event: RelationChangedEvent) -> None:

self._handle_database_start(event)

def _on_async_relation_departed(self, event: RelationDepartedEvent) -> None:
"""Set a flag to avoid setting a wrong status message on relation broken event handler."""
# This is needed because of https://bugs.launchpad.net/juju/+bug/1979811.
if event.departing_unit == self.charm.unit and self.charm._peers is not None:
self.charm._peers.data[self.charm.unit].update({"departing": "True"})

def _on_async_relation_joined(self, _) -> None:
def _on_async_relation_created(self, _) -> None:
"""Publish this unit address in the relation data."""
self._relation.data[self.charm.unit].update({"unit-address": self.charm._unit_ip})

Expand All @@ -563,6 +557,12 @@ def _on_async_relation_joined(self, _) -> None:
"unit-promoted-cluster-counter": highest_promoted_cluster_counter
})

def _on_async_relation_departed(self, event: RelationDepartedEvent) -> None:
"""Set a flag to avoid setting a wrong status message on relation broken event handler."""
# This is needed because of https://bugs.launchpad.net/juju/+bug/1979811.
if event.departing_unit == self.charm.unit and self.charm._peers is not None:
self.charm._peers.data[self.charm.unit].update({"departing": "True"})

def _on_create_replication(self, event: ActionEvent) -> None:
"""Set up asynchronous replication between two clusters."""
if self._get_primary_cluster() is not None:
Expand Down
8 changes: 5 additions & 3 deletions tests/integration/ha_tests/test_async_replication.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,9 +438,11 @@ async def test_async_replication_failover_in_main_cluster(
)

# Check that the sync-standby unit is not the same as before.
new_sync_standby = await get_sync_standby(ops_test, first_model, DATABASE_APP_NAME)
logger.info(f"New sync-standby: {new_sync_standby}")
assert new_sync_standby != sync_standby, "Sync-standby is the same as before"
for attempt in Retrying(stop=stop_after_delay(90), wait=wait_fixed(10), reraise=True):
with attempt:
new_sync_standby = await get_sync_standby(ops_test, first_model, DATABASE_APP_NAME)
logger.info(f"New sync-standby: {new_sync_standby}")
assert new_sync_standby != sync_standby, "Sync-standby is the same as before"

logger.info("Ensure continuous_writes after the crashed unit")
await are_writes_increasing(ops_test)
Expand Down
Loading