diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 165e2ce55c..3490ba8ab1 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -82,6 +82,8 @@ jobs: - password-rotation-integration - plugins-integration - tls-integration + - upgrade-integration + - upgrade-from-stable-integration agent-versions: - "2.9.45" # renovate: latest juju 2 - "3.1.5" # renovate: latest juju 3 diff --git a/src/charm.py b/src/charm.py index 6488d08e20..2cccced46f 100755 --- a/src/charm.py +++ b/src/charm.py @@ -148,6 +148,14 @@ def __init__(self, *args): log_slots=[f"{POSTGRESQL_SNAP_NAME}:logs"], ) + @property + def app_units(self) -> set[Unit]: + """The peer-related units in the application.""" + if not self._peers: + return set() + + return {self.unit, *self._peers.units} + @property def app_peer_data(self) -> Dict: """Application peer relation data object.""" @@ -946,6 +954,12 @@ def _can_start(self, event: StartEvent) -> bool: self._reboot_on_detached_storage(event) return False + # Safeguard against starting while upgrading. + if not self.upgrade.idle: + logger.debug("Defer on_start: Cluster is upgrading") + event.defer() + return False + # Doesn't try to bootstrap the cluster if it's in a blocked state # caused, for example, because a failed installation of packages. if self.is_blocked: @@ -992,6 +1006,17 @@ def _setup_exporter(self) -> None: cache = snap.SnapCache() postgres_snap = cache[POSTGRESQL_SNAP_NAME] + if ( + postgres_snap.revision + != list( + filter(lambda snap_package: snap_package[0] == POSTGRESQL_SNAP_NAME, SNAP_PACKAGES) + )[0][1]["revision"] + ): + logger.debug( + "Early exit _setup_exporter: snap was not refreshed to the right version yet" + ) + return + postgres_snap.set( { "exporter.user": MONITORING_USER, @@ -1140,11 +1165,7 @@ def _on_set_password(self, event: ActionEvent) -> None: def _on_update_status(self, _) -> None: """Update the unit status message and users list in the database.""" - if "cluster_initialised" not in self._peers.data[self.app]: - return - - if self.is_blocked: - logger.debug("on_update_status early exit: Unit is in Blocked status") + if not self._can_run_on_update_status(): return if "restoring-backup" in self.app_peer_data: @@ -1182,6 +1203,20 @@ def _on_update_status(self, _) -> None: # Restart topology observer if it is gone self._observer.start_observer() + def _can_run_on_update_status(self) -> bool: + if "cluster_initialised" not in self._peers.data[self.app]: + return False + + if not self.upgrade.idle: + logger.debug("Early exit on_update_status: upgrade in progress") + return False + + if self.is_blocked: + logger.debug("on_update_status early exit: Unit is in Blocked status") + return False + + return True + def _handle_processes_failures(self) -> bool: """Handle Patroni and PostgreSQL OS processes failures. diff --git a/src/upgrade.py b/src/upgrade.py index a01abaaa9a..ef69e5ac49 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -12,12 +12,13 @@ DependencyModel, UpgradeGrantedEvent, ) -from ops.model import ActiveStatus, MaintenanceStatus, WaitingStatus +from ops.model import MaintenanceStatus, RelationDataContent, WaitingStatus from pydantic import BaseModel from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed from typing_extensions import override -from constants import SNAP_PACKAGES +from constants import APP_SCOPE, MONITORING_PASSWORD_KEY, MONITORING_USER, SNAP_PACKAGES +from utils import new_password logger = logging.getLogger(__name__) @@ -26,6 +27,7 @@ class PostgreSQLDependencyModel(BaseModel): """PostgreSQL dependencies model.""" charm: DependencyModel + snap: DependencyModel def get_postgresql_dependencies_model() -> PostgreSQLDependencyModel: @@ -42,6 +44,7 @@ def __init__(self, charm, model: BaseModel, **kwargs) -> None: """Initialize the class.""" super().__init__(charm, model, **kwargs) self.charm = charm + self._on_upgrade_charm_check_legacy() @override def build_upgrade_stack(self) -> List[int]: @@ -77,9 +80,56 @@ def log_rollback_instructions(self) -> None: "Run `juju refresh --revision postgresql` to initiate the rollback" ) + def _on_upgrade_charm_check_legacy(self) -> None: + if not self.peer_relation or len(self.app_units) < len(self.charm.app_units): + logger.debug("Wait all units join the upgrade relation") + return + + if self.state: + # If state set, upgrade is supported. Just set the snap information + # in the dependencies, as it's missing in the first revisions that + # support upgrades. + dependencies = self.peer_relation.data[self.charm.app].get("dependencies") + if ( + self.charm.unit.is_leader() + and dependencies is not None + and "snap" not in json.loads(dependencies) + ): + fixed_dependencies = json.loads(dependencies) + fixed_dependencies["snap"] = { + "dependencies": {}, + "name": "charmed-postgresql", + "upgrade_supported": "^14", + "version": "14.9", + } + self.peer_relation.data[self.charm.app].update( + {"dependencies": json.dumps(fixed_dependencies)} + ) + return + + if not self.charm.unit.is_leader(): + # set ready state on non-leader units + self.unit_upgrade_data.update({"state": "ready"}) + return + + peers_state = list(filter(lambda state: state != "", self.unit_states)) + + if len(peers_state) == len(self.peer_relation.units) and ( + set(peers_state) == {"ready"} or len(peers_state) == 0 + ): + if self.charm._patroni.member_started: + # All peers have set the state to ready + self.unit_upgrade_data.update({"state": "ready"}) + self._prepare_upgrade_from_legacy() + getattr(self.on, "upgrade_charm").emit() + @override def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: # Refresh the charmed PostgreSQL snap and restart the database. + # Update the configuration. + self.charm.unit.status = MaintenanceStatus("updating configuration") + self.charm.update_config() + self.charm.unit.status = MaintenanceStatus("refreshing the snap") self.charm._install_snap_packages(packages=SNAP_PACKAGES, refresh=True) @@ -91,6 +141,14 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: self.charm._setup_exporter() self.charm.backup.start_stop_pgbackrest_service() + try: + self.charm.unit.set_workload_version( + self.charm._patroni.get_postgresql_version() or "unset" + ) + except TypeError: + # Don't fail on this, just log it. + logger.warning("Failed to get PostgreSQL version") + # Wait until the database initialise. self.charm.unit.status = WaitingStatus("waiting for database initialisation") try: @@ -110,7 +168,6 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: raise Exception() self.set_unit_completed() - self.charm.unit.status = ActiveStatus() # Ensures leader gets its own relation-changed when it upgrades if self.charm.unit.is_leader(): @@ -144,3 +201,36 @@ def pre_upgrade_check(self) -> None: "a backup is being created", "wait for the backup creation to finish before starting the upgrade", ) + + def _prepare_upgrade_from_legacy(self) -> None: + """Prepare upgrade from legacy charm without upgrade support. + + Assumes run on leader unit only. + """ + logger.warning("Upgrading from unsupported version") + + # Populate app upgrade databag to allow upgrade procedure + logger.debug("Building upgrade stack") + upgrade_stack = self.build_upgrade_stack() + logger.debug(f"Upgrade stack: {upgrade_stack}") + self.upgrade_stack = upgrade_stack + logger.debug("Persisting dependencies to upgrade relation data...") + self.peer_relation.data[self.charm.app].update( + {"dependencies": json.dumps(self.dependency_model.dict())} + ) + if self.charm.get_secret(APP_SCOPE, MONITORING_PASSWORD_KEY) is None: + self.charm.set_secret(APP_SCOPE, MONITORING_PASSWORD_KEY, new_password()) + users = self.charm.postgresql.list_users() + if MONITORING_USER not in users: + # Create the monitoring user. + self.charm.postgresql.create_user( + MONITORING_USER, + self.charm.get_secret(APP_SCOPE, MONITORING_PASSWORD_KEY), + extra_user_roles="pg_monitor", + ) + self.charm.postgresql.set_up_database() + + @property + def unit_upgrade_data(self) -> RelationDataContent: + """Return the application upgrade data.""" + return self.peer_relation.data[self.charm.unit] diff --git a/tests/integration/ha_tests/test_upgrade.py b/tests/integration/ha_tests/test_upgrade.py new file mode 100644 index 0000000000..2dd8cac653 --- /dev/null +++ b/tests/integration/ha_tests/test_upgrade.py @@ -0,0 +1,207 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +import json +import logging +import shutil +import zipfile +from pathlib import Path +from typing import Union + +import pytest +from pytest_operator.plugin import OpsTest + +from tests.integration.ha_tests.helpers import ( + APPLICATION_NAME, + are_writes_increasing, + check_writes, + start_continuous_writes, +) +from tests.integration.helpers import ( + DATABASE_APP_NAME, + count_switchovers, + get_leader_unit, + get_primary, +) +from tests.integration.new_relations.helpers import get_application_relation_data + +logger = logging.getLogger(__name__) + +TIMEOUT = 5 * 60 + + +@pytest.mark.abort_on_fail +async def test_deploy_latest(ops_test: OpsTest) -> None: + """Simple test to ensure that the PostgreSQL and application charms get deployed.""" + await ops_test.model.deploy( + DATABASE_APP_NAME, + num_units=3, + channel="14/edge", + config={"profile": "testing"}, + ) + await ops_test.model.deploy( + APPLICATION_NAME, + num_units=1, + channel="latest/edge", + ) + logger.info("Wait for applications to become active") + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active" + ) + assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3 + + +@pytest.mark.abort_on_fail +async def test_pre_upgrade_check(ops_test: OpsTest) -> None: + """Test that the pre-upgrade-check action runs successfully.""" + logger.info("Get leader unit") + leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) + assert leader_unit is not None, "No leader unit found" + + logger.info("Run pre-upgrade-check action") + action = await leader_unit.run_action("pre-upgrade-check") + await action.wait() + + +@pytest.mark.abort_on_fail +async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None: + # Start an application that continuously writes data to the database. + logger.info("starting continuous writes to the database") + await start_continuous_writes(ops_test, DATABASE_APP_NAME) + + # Check whether writes are increasing. + logger.info("checking whether writes are increasing") + await are_writes_increasing(ops_test) + + primary_name = await get_primary(ops_test, f"{DATABASE_APP_NAME}/0") + initial_number_of_switchovers = count_switchovers(ops_test, primary_name) + + application = ops_test.model.applications[DATABASE_APP_NAME] + + logger.info("Build charm locally") + charm = await ops_test.build_charm(".") + + logger.info("Refresh the charm") + await application.refresh(path=charm) + + logger.info("Wait for upgrade to start") + await ops_test.model.block_until( + lambda: "waiting" in {unit.workload_status for unit in application.units}, + timeout=TIMEOUT, + ) + + logger.info("Wait for upgrade to complete") + async with ops_test.fast_forward("60s"): + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME], status="active", idle_period=30, timeout=TIMEOUT + ) + + # Check whether writes are increasing. + logger.info("checking whether writes are increasing") + await are_writes_increasing(ops_test) + + # Verify that no writes to the database were missed after stopping the writes + # (check that all the units have all the writes). + logger.info("checking whether no writes were lost") + await check_writes(ops_test) + + logger.info("checking the number of switchovers") + final_number_of_switchovers = count_switchovers(ops_test, primary_name) + assert ( + final_number_of_switchovers - initial_number_of_switchovers + ) <= 2, "Number of switchovers is greater than 2" + + +@pytest.mark.abort_on_fail +async def test_fail_and_rollback(ops_test, continuous_writes) -> None: + # Start an application that continuously writes data to the database. + logger.info("starting continuous writes to the database") + await start_continuous_writes(ops_test, DATABASE_APP_NAME) + + # Check whether writes are increasing. + logger.info("checking whether writes are increasing") + await are_writes_increasing(ops_test) + + logger.info("Get leader unit") + leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) + assert leader_unit is not None, "No leader unit found" + + logger.info("Run pre-upgrade-check action") + action = await leader_unit.run_action("pre-upgrade-check") + await action.wait() + + local_charm = await ops_test.build_charm(".") + if isinstance(local_charm, str): + filename = local_charm.split("/")[-1] + else: + filename = local_charm.name + fault_charm = Path("/tmp/", filename) + shutil.copy(local_charm, fault_charm) + + logger.info("Inject dependency fault") + await inject_dependency_fault(ops_test, DATABASE_APP_NAME, fault_charm) + + application = ops_test.model.applications[DATABASE_APP_NAME] + + logger.info("Refresh the charm") + await application.refresh(path=fault_charm) + + logger.info("Wait for upgrade to fail") + async with ops_test.fast_forward("60s"): + await ops_test.model.block_until( + lambda: "blocked" in {unit.workload_status for unit in application.units}, + timeout=TIMEOUT, + ) + + logger.info("Ensure continuous_writes while in failure state on remaining units") + await are_writes_increasing(ops_test) + + logger.info("Re-run pre-upgrade-check action") + action = await leader_unit.run_action("pre-upgrade-check") + await action.wait() + + logger.info("Re-refresh the charm") + await application.refresh(path=local_charm) + + logger.info("Wait for upgrade to start") + await ops_test.model.block_until( + lambda: "waiting" in {unit.workload_status for unit in application.units}, + timeout=TIMEOUT, + ) + + logger.info("Wait for application to recover") + async with ops_test.fast_forward("60s"): + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT + ) + + logger.info("Ensure continuous_writes after rollback procedure") + await are_writes_increasing(ops_test) + + # Verify that no writes to the database were missed after stopping the writes + # (check that all the units have all the writes). + logger.info("Checking whether no writes were lost") + await check_writes(ops_test) + + # Remove fault charm file. + fault_charm.unlink() + + +async def inject_dependency_fault( + ops_test: OpsTest, application_name: str, charm_file: Union[str, Path] +) -> None: + """Inject a dependency fault into the PostgreSQL charm.""" + # Query running dependency to overwrite with incompatible version. + dependencies = await get_application_relation_data( + ops_test, application_name, "upgrade", "dependencies" + ) + loaded_dependency_dict = json.loads(dependencies) + if "snap" not in loaded_dependency_dict: + loaded_dependency_dict["snap"] = {"dependencies": {}, "name": "charmed-postgresql"} + loaded_dependency_dict["snap"]["upgrade_supported"] = "^15" + loaded_dependency_dict["snap"]["version"] = "15.0" + + # Overwrite dependency.json with incompatible version. + with zipfile.ZipFile(charm_file, mode="a") as charm_zip: + charm_zip.writestr("src/dependency.json", json.dumps(loaded_dependency_dict)) diff --git a/tests/integration/ha_tests/test_upgrade_from_stable.py b/tests/integration/ha_tests/test_upgrade_from_stable.py new file mode 100644 index 0000000000..5d20fcfa50 --- /dev/null +++ b/tests/integration/ha_tests/test_upgrade_from_stable.py @@ -0,0 +1,146 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +import json +import logging + +import pytest +from pytest_operator.plugin import OpsTest + +from tests.integration.ha_tests.helpers import ( + APPLICATION_NAME, + are_writes_increasing, + check_writes, + start_continuous_writes, +) +from tests.integration.helpers import ( + DATABASE_APP_NAME, + count_switchovers, + get_leader_unit, + get_primary, + remove_chown_workaround, +) + +logger = logging.getLogger(__name__) + +TIMEOUT = 5 * 60 + + +@pytest.mark.abort_on_fail +async def test_deploy_stable(ops_test: OpsTest) -> None: + """Simple test to ensure that the PostgreSQL and application charms get deployed.""" + return_code, charm_info, stderr = await ops_test.juju("info", "postgresql", "--format=json") + if return_code != 0: + raise Exception(f"failed to get charm info with error: {stderr}") + # Revisions lower than 315 have a currently broken workaround for chown. + parsed_charm_info = json.loads(charm_info) + revision = ( + parsed_charm_info["channels"]["14"]["stable"][0]["revision"] + if "channels" in parsed_charm_info + else parsed_charm_info["channel-map"]["14/stable"]["revision"] + ) + logger.info(f"14/stable revision: {revision}") + if int(revision) < 315: + original_charm_name = "./postgresql.charm" + return_code, _, stderr = await ops_test.juju( + "download", + "postgresql", + "--channel=14/stable", + f"--filepath={original_charm_name}", + ) + if return_code != 0: + raise Exception( + f"failed to download charm from 14/stable channel with error: {stderr}" + ) + patched_charm_name = "./modified_postgresql.charm" + remove_chown_workaround(original_charm_name, patched_charm_name) + return_code, _, stderr = await ops_test.juju("deploy", patched_charm_name, "-n", "3") + if return_code != 0: + raise Exception(f"failed to deploy charm from 14/stable channel with error: {stderr}") + else: + await ops_test.model.deploy( + DATABASE_APP_NAME, + num_units=3, + channel="14/stable", + ) + await ops_test.model.deploy( + APPLICATION_NAME, + num_units=1, + channel="latest/edge", + ) + logger.info("Wait for applications to become active") + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active", timeout=(20 * 60) + ) + assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3 + + +@pytest.mark.abort_on_fail +async def test_pre_upgrade_check(ops_test: OpsTest) -> None: + """Test that the pre-upgrade-check action runs successfully.""" + application = ops_test.model.applications[DATABASE_APP_NAME] + if "pre-upgrade-check" not in await application.get_actions(): + logger.info("skipping the test because the charm from 14/stable doesn't support upgrade") + return + + logger.info("Get leader unit") + leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) + assert leader_unit is not None, "No leader unit found" + + logger.info("Run pre-upgrade-check action") + action = await leader_unit.run_action("pre-upgrade-check") + await action.wait() + + +@pytest.mark.abort_on_fail +async def test_upgrade_from_stable(ops_test: OpsTest): + """Test updating from stable channel.""" + # Start an application that continuously writes data to the database. + logger.info("starting continuous writes to the database") + await start_continuous_writes(ops_test, DATABASE_APP_NAME) + + # Check whether writes are increasing. + logger.info("checking whether writes are increasing") + await are_writes_increasing(ops_test) + + primary_name = await get_primary(ops_test, f"{DATABASE_APP_NAME}/0") + initial_number_of_switchovers = count_switchovers(ops_test, primary_name) + + application = ops_test.model.applications[DATABASE_APP_NAME] + actions = await application.get_actions() + + logger.info("Build charm locally") + charm = await ops_test.build_charm(".") + + logger.info("Refresh the charm") + await application.refresh(path=charm) + + logger.info("Wait for upgrade to start") + await ops_test.model.block_until( + lambda: ("waiting" if "pre-upgrade-check" in actions else "maintenance") + in {unit.workload_status for unit in application.units}, + timeout=TIMEOUT, + ) + + logger.info("Wait for upgrade to complete") + async with ops_test.fast_forward("60s"): + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME], status="active", idle_period=30, timeout=TIMEOUT + ) + + # Check whether writes are increasing. + logger.info("checking whether writes are increasing") + await are_writes_increasing(ops_test) + + # Verify that no writes to the database were missed after stopping the writes + # (check that all the units have all the writes). + logger.info("checking whether no writes were lost") + await check_writes(ops_test) + + # Check the number of switchovers. + if "pre-upgrade-check" in actions: + logger.info("checking the number of switchovers") + final_number_of_switchovers = count_switchovers(ops_test, primary_name) + assert ( + final_number_of_switchovers - initial_number_of_switchovers + ) <= 2, "Number of switchovers is greater than 2" diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 7ec9adefbc..b704da8a3c 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -241,6 +241,13 @@ def convert_records_to_dict(records: List[tuple]) -> dict: return records_dict +def count_switchovers(ops_test: OpsTest, unit_name: str) -> int: + """Return the number of performed switchovers.""" + unit_address = get_unit_address(ops_test, unit_name) + switchover_history_info = requests.get(f"http://{unit_address}:8008/history") + return len(switchover_history_info.json()) + + def db_connect(host: str, password: str) -> psycopg2.extensions.connection: """Returns psycopg2 connection object linked to postgres db in the given host. @@ -559,6 +566,16 @@ async def get_landscape_api_credentials(ops_test: OpsTest) -> List[str]: return output +async def get_leader_unit(ops_test: OpsTest, app: str) -> Optional[Unit]: + leader_unit = None + for unit in ops_test.model.applications[app].units: + if await unit.is_leader_from_status(): + leader_unit = unit + break + + return leader_unit + + async def get_machine_from_unit(ops_test: OpsTest, unit_name: str) -> str: """Get the name of the machine from a specific unit. @@ -755,6 +772,43 @@ async def check_tls_patroni_api(ops_test: OpsTest, unit_name: str, enabled: bool return False +def remove_chown_workaround(original_charm_filename: str, patched_charm_filename: str) -> None: + """Remove the chown workaround from the charm.""" + with zipfile.ZipFile(original_charm_filename, "r") as charm_file, zipfile.ZipFile( + patched_charm_filename, "w" + ) as modified_charm_file: + # Iterate the input files + unix_attributes = {} + for charm_info in charm_file.infolist(): + # Read input file + with charm_file.open(charm_info) as file: + if charm_info.filename == "src/charm.py": + content = file.read() + # Modify the content of the file by replacing a string + content = ( + content.decode() + .replace( + """ try: + self._patch_snap_seccomp_profile() + except subprocess.CalledProcessError as e: + logger.exception(e) + self.unit.status = BlockedStatus("failed to patch snap seccomp profile") + return""", + "", + ) + .encode() + ) + # Write content. + modified_charm_file.writestr(charm_info.filename, content) + else: # Other file, don't want to modify => just copy it. + content = file.read() + modified_charm_file.writestr(charm_info.filename, content) + unix_attributes[charm_info.filename] = charm_info.external_attr >> 16 + + for modified_charm_info in modified_charm_file.infolist(): + modified_charm_info.external_attr = unix_attributes[modified_charm_info.filename] << 16 + + @retry( retry=retry_if_result(lambda x: not x), stop=stop_after_attempt(10), diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index b767e613c5..00b19f1c80 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -318,6 +318,7 @@ def test_enable_disable_extensions(self): @patch("charm.PostgresqlOperatorCharm._replication_password") @patch("charm.PostgresqlOperatorCharm._get_password") @patch("charm.PostgresqlOperatorCharm._reboot_on_detached_storage") + @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) @patch( "charm.PostgresqlOperatorCharm._is_storage_attached", side_effect=[False, True, True, True, True], @@ -325,6 +326,7 @@ def test_enable_disable_extensions(self): def test_on_start( self, _is_storage_attached, + _idle, _reboot_on_detached_storage, _get_password, _replication_password, @@ -402,6 +404,7 @@ def test_on_start( @patch.object(EventBase, "defer") @patch("charm.PostgresqlOperatorCharm._replication_password") @patch("charm.PostgresqlOperatorCharm._get_password") + @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) @patch( "charm.PostgresqlOperatorCharm._is_storage_attached", return_value=True, @@ -409,6 +412,7 @@ def test_on_start( def test_on_start_replica( self, _is_storage_attached, + _idle, _get_password, _replication_password, _defer, @@ -457,10 +461,12 @@ def test_on_start_replica( @patch("charm.PostgresqlOperatorCharm.postgresql") @patch("charm.Patroni") @patch("charm.PostgresqlOperatorCharm._get_password") + @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) @patch("charm.PostgresqlOperatorCharm._is_storage_attached", return_value=True) def test_on_start_no_patroni_member( self, _is_storage_attached, + _idle, _get_password, patroni, _postgresql, @@ -611,8 +617,10 @@ def test_on_set_password( new_callable=PropertyMock(return_value=True), ) @patch("charm.PostgreSQLProvider.oversee_users") + @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) def test_on_update_status( self, + _, _oversee_users, _primary_endpoint, _update_relation_endpoints, @@ -682,8 +690,10 @@ def test_on_update_status( @patch("charm.PostgresqlOperatorCharm.update_config") @patch("charm.Patroni.member_started", new_callable=PropertyMock) @patch("charm.Patroni.get_member_status") + @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) def test_on_update_status_after_restore_operation( self, + _, _get_member_status, _member_started, _update_config, @@ -694,7 +704,7 @@ def test_on_update_status_after_restore_operation( _update_relation_endpoints, _handle_workload_failures, _set_primary_status_message, - _, + __, ): # Test when the restore operation fails. with self.harness.hooks_disabled(): diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 9402ad1295..dd584f2899 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -62,8 +62,10 @@ def test_log_rollback(self, mock_logging, _update_config): @patch("charm.PostgresqlOperatorCharm._setup_exporter") @patch("charm.Patroni.start_patroni") @patch("charm.PostgresqlOperatorCharm._install_snap_packages") + @patch("charm.PostgresqlOperatorCharm.update_config") def test_on_upgrade_granted( self, + _update_config, _install_snap_packages, _start_patroni, _setup_exporter, @@ -80,6 +82,7 @@ def test_on_upgrade_granted( mock_event = MagicMock() _start_patroni.return_value = False self.charm.upgrade._on_upgrade_granted(mock_event) + _update_config.assert_called_once() _install_snap_packages.assert_called_once_with(packages=SNAP_PACKAGES, refresh=True) _member_started.assert_not_called() mock_event.defer.assert_not_called() diff --git a/tox.ini b/tox.ini index 3e27dcee39..5f9dc1aefd 100644 --- a/tox.ini +++ b/tox.ini @@ -182,6 +182,28 @@ commands = pip install juju=={env:LIBJUJU} poetry run pytest -v --tb native --log-cli-level=INFO -s {posargs} {[vars]tests_path}/integration/test_tls.py +[testenv:upgrade-integration-{juju2, juju3}] +description = Run upgrade integration tests +pass_env = + {[testenv]pass_env} + CI + CI_PACKED_CHARMS +commands = + poetry install --with integration + pip install juju=={env:LIBJUJU} + poetry run pytest -v --tb native --log-cli-level=INFO -s {posargs} {[vars]tests_path}/integration/ha_tests/test_upgrade.py + +[testenv:upgrade-from-stable-integration-{juju2, juju3}] +description = Run upgrade from stable integration tests +pass_env = + {[testenv]pass_env} + CI + CI_PACKED_CHARMS +commands = + poetry install --with integration + pip install juju=={env:LIBJUJU} + poetry run pytest -v --tb native --log-cli-level=INFO -s {posargs} {[vars]tests_path}/integration/ha_tests/test_upgrade_from_stable.py + [testenv:integration-{juju2, juju3}] description = Run all integration tests pass_env =