From 1ef75c3289841fc0aad4e4cc983bf773d1ba582a Mon Sep 17 00:00:00 2001 From: Dragomir Penev <6687393+dragomirp@users.noreply.github.com> Date: Sun, 16 Jun 2024 05:00:48 +0300 Subject: [PATCH] [MISC] Add codecov (#512) * Add codecov * XML export * Coverage * Changing asserts * Coverage * Change asserts * Coverage --- .github/codecov.yml | 10 + .github/workflows/ci.yaml | 4 + .gitignore | 1 + tests/unit/test_backups.py | 607 +++++++++++-------------- tests/unit/test_charm.py | 255 ++++++++++- tests/unit/test_db.py | 92 ++-- tests/unit/test_patroni.py | 46 +- tests/unit/test_postgresql.py | 213 +++++---- tests/unit/test_postgresql_provider.py | 63 +-- tests/unit/test_postgresql_tls.py | 75 ++- tests/unit/test_upgrade.py | 70 +-- tests/unit/test_utils.py | 29 +- tox.ini | 1 + 13 files changed, 812 insertions(+), 654 deletions(-) create mode 100644 .github/codecov.yml diff --git a/.github/codecov.yml b/.github/codecov.yml new file mode 100644 index 0000000000..76d0f335e1 --- /dev/null +++ b/.github/codecov.yml @@ -0,0 +1,10 @@ +github_checks: + annotations: false +coverage: + status: + project: + default: + target: 70% + patch: + default: + target: 33% diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 82cf63dbfd..2da9733745 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -37,6 +37,10 @@ jobs: pipx install poetry - name: Run tests run: tox run -e unit + - name: Upload Coverage to Codecov + uses: codecov/codecov-action@v4 + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} build: name: Build charm diff --git a/.gitignore b/.gitignore index 0d9e670c18..75610a94ec 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ build/ *.charm .tox/ .coverage +coverage.xml __pycache__/ *.py[cod] requirements.txt diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 19d0487d61..d41360bf15 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -2,7 +2,6 @@ # See LICENSE file for licensing details. import datetime from typing import OrderedDict -from unittest import TestCase from unittest.mock import MagicMock, PropertyMock, call, mock_open, patch import pytest @@ -25,9 +24,6 @@ FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE = "failed to initialize stanza, check your S3 settings" S3_PARAMETERS_RELATION = "s3-parameters" -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -47,18 +43,15 @@ def harness(): def test_stanza_name(harness): - tc.assertEqual( - harness.charm.backup.stanza_name, - f"{harness.charm.model.name}.{harness.charm.cluster_name}", + assert ( + harness.charm.backup.stanza_name + == f"{harness.charm.model.name}.{harness.charm.cluster_name}" ) def test_tls_ca_chain_filename(harness): # Test when the TLS CA chain is not available. - tc.assertEqual( - harness.charm.backup._tls_ca_chain_filename, - "", - ) + assert harness.charm.backup._tls_ca_chain_filename == "" # Test when the TLS CA chain is available. with harness.hooks_disabled(): @@ -74,33 +67,30 @@ def test_tls_ca_chain_filename(harness): "tls-ca-chain": '["fake-tls-ca-chain"]', }, ) - tc.assertEqual( - harness.charm.backup._tls_ca_chain_filename, - "/var/lib/postgresql/data/pgbackrest-tls-ca-chain.crt", + assert ( + harness.charm.backup._tls_ca_chain_filename + == "/var/lib/postgresql/data/pgbackrest-tls-ca-chain.crt" ) def test_are_backup_settings_ok(harness): # Test without S3 relation. - tc.assertEqual( - harness.charm.backup._are_backup_settings_ok(), - (False, "Relation with s3-integrator charm missing, cannot create/restore backup."), + assert harness.charm.backup._are_backup_settings_ok() == ( + False, + "Relation with s3-integrator charm missing, cannot create/restore backup.", ) # Test when there are missing S3 parameters. harness.add_relation(S3_PARAMETERS_RELATION, "s3-integrator") - tc.assertEqual( - harness.charm.backup._are_backup_settings_ok(), - (False, "Missing S3 parameters: ['bucket', 'access-key', 'secret-key']"), + assert harness.charm.backup._are_backup_settings_ok() == ( + False, + "Missing S3 parameters: ['bucket', 'access-key', 'secret-key']", ) # Test when all required parameters are provided. with patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters: _retrieve_s3_parameters.return_value = ["bucket", "access-key", "secret-key"], [] - tc.assertEqual( - harness.charm.backup._are_backup_settings_ok(), - (True, None), - ) + assert harness.charm.backup._are_backup_settings_ok() == (True, None) def test_can_initialise_stanza(harness): @@ -108,10 +98,7 @@ def test_can_initialise_stanza(harness): # Test when Patroni or PostgreSQL hasn't started yet # and the unit hasn't joined the peer relation yet. _member_started.return_value = False - tc.assertEqual( - harness.charm.backup._can_initialise_stanza, - False, - ) + assert harness.charm.backup._can_initialise_stanza is False # Test when the unit hasn't configured TLS yet while other unit already has TLS enabled. harness.add_relation_unit( @@ -123,17 +110,11 @@ def test_can_initialise_stanza(harness): f"{harness.charm.app.name}/1", {"tls": "enabled"}, ) - tc.assertEqual( - harness.charm.backup._can_initialise_stanza, - False, - ) + assert harness.charm.backup._can_initialise_stanza is False # Test when everything is ok to initialise the stanza. _member_started.return_value = True - tc.assertEqual( - harness.charm.backup._can_initialise_stanza, - True, - ) + assert harness.charm.backup._can_initialise_stanza is True def test_can_unit_perform_backup(harness): @@ -148,17 +129,17 @@ def test_can_unit_perform_backup(harness): # Test when the charm fails to retrieve the primary. peer_rel_id = harness.model.get_relation(PEER).id _is_primary.side_effect = RetryError(last_attempt=1) - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "Unit cannot perform backups as the database seems to be offline"), + assert harness.charm.backup._can_unit_perform_backup() == ( + False, + "Unit cannot perform backups as the database seems to be offline", ) # Test when the unit is in a blocked state. _is_primary.side_effect = None harness.charm.unit.status = BlockedStatus("fake blocked state") - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "Unit is in a blocking state"), + assert harness.charm.backup._can_unit_perform_backup() == ( + False, + "Unit is in a blocking state", ) # Test when running the check in the primary, there are replicas and TLS is enabled. @@ -171,9 +152,9 @@ def test_can_unit_perform_backup(harness): harness.charm.unit.name, {"tls": "True"}, ) - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "Unit cannot perform backups as it is the cluster primary"), + assert harness.charm.backup._can_unit_perform_backup() == ( + False, + "Unit cannot perform backups as it is the cluster primary", ) # Test when running the check in a replica and TLS is disabled. @@ -184,24 +165,24 @@ def test_can_unit_perform_backup(harness): harness.charm.unit.name, {"tls": ""}, ) - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "Unit cannot perform backups as TLS is not enabled"), + assert harness.charm.backup._can_unit_perform_backup() == ( + False, + "Unit cannot perform backups as TLS is not enabled", ) # Test when Patroni or PostgreSQL hasn't started yet. _is_primary.return_value = True _member_started.return_value = False - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "Unit cannot perform backups as it's not in running state"), + assert harness.charm.backup._can_unit_perform_backup() == ( + False, + "Unit cannot perform backups as it's not in running state", ) # Test when the stanza was not initialised yet. _member_started.return_value = True - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "Stanza was not initialised"), + assert harness.charm.backup._can_unit_perform_backup() == ( + False, + "Stanza was not initialised", ) # Test when S3 parameters are not ok. @@ -212,17 +193,11 @@ def test_can_unit_perform_backup(harness): {"stanza": harness.charm.backup.stanza_name}, ) _are_backup_settings_ok.return_value = (False, "fake error message") - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (False, "fake error message"), - ) + assert harness.charm.backup._can_unit_perform_backup() == (False, "fake error message") # Test when everything is ok to run a backup. _are_backup_settings_ok.return_value = (True, None) - tc.assertEqual( - harness.charm.backup._can_unit_perform_backup(), - (True, None), - ) + assert harness.charm.backup._can_unit_perform_backup() == (True, None) def test_can_use_s3_repository(harness): @@ -247,9 +222,9 @@ def test_can_use_s3_repository(harness): # Test when nothing is returned from the pgBackRest info command. _execute_command.return_value = (None, None) - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (False, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE), + assert harness.charm.backup.can_use_s3_repository() == ( + False, + FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE, ) # Test when the unit is a replica. @@ -258,16 +233,12 @@ def test_can_use_s3_repository(harness): None, ) _execute_command.return_value = pgbackrest_info_same_cluster_backup_output - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (True, None), - ) + assert harness.charm.backup.can_use_s3_repository() == (True, None) # Assert that the stanza name is still in the unit relation data. - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": harness.charm.backup.stanza_name}, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": harness.charm.backup.stanza_name + } # Test when the unit is the leader and the workload is running, # but an exception happens when retrieving the cluster system id. @@ -278,11 +249,16 @@ def test_can_use_s3_repository(harness): ] with harness.hooks_disabled(): harness.set_leader() - with tc.assertRaises(Exception): - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), + try: + assert harness.charm.backup.can_use_s3_repository() == ( + False, + ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, ) + assert False + except AssertionError as e: + raise e + except Exception: + pass _update_config.assert_not_called() _member_started.assert_not_called() _reload_patroni_configuration.assert_not_called() @@ -306,16 +282,16 @@ def test_can_use_s3_repository(harness): harness.charm.app.name, {"stanza": harness.charm.backup.stanza_name}, ) - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), + assert harness.charm.backup.can_use_s3_repository() == ( + False, + ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, ) _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_called_once() # Assert that the stanza name is not present in the unit relation data anymore. - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} # Test when the cluster system id can be retrieved, but it's different from the stanza system id. _update_config.reset_mock() @@ -339,16 +315,16 @@ def test_can_use_s3_repository(harness): harness.charm.app.name, {"stanza": harness.charm.backup.stanza_name}, ) - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), + assert harness.charm.backup.can_use_s3_repository() == ( + False, + ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, ) _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_called_once() # Assert that the stanza name is not present in the unit relation data anymore. - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} # Test when the workload is not running. _update_config.reset_mock() @@ -365,16 +341,16 @@ def test_can_use_s3_repository(harness): pgbackrest_info_same_cluster_backup_output, other_instance_system_identifier_output, ] - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), + assert harness.charm.backup.can_use_s3_repository() == ( + False, + ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, ) _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_not_called() # Assert that the stanza name is not present in the unit relation data anymore. - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} # Test when there is no backup from another cluster in the S3 repository. with harness.hooks_disabled(): @@ -387,36 +363,30 @@ def test_can_use_s3_repository(harness): pgbackrest_info_same_cluster_backup_output, same_instance_system_identifier_output, ] - tc.assertEqual( - harness.charm.backup.can_use_s3_repository(), - (True, None), - ) + assert harness.charm.backup.can_use_s3_repository() == (True, None) # Assert that the stanza name is still in the unit relation data. - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": harness.charm.backup.stanza_name}, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": harness.charm.backup.stanza_name + } def test_construct_endpoint(harness): # Test with an AWS endpoint without region. s3_parameters = {"endpoint": "https://s3.amazonaws.com", "region": ""} - tc.assertEqual( - harness.charm.backup._construct_endpoint(s3_parameters), "https://s3.amazonaws.com" - ) + assert harness.charm.backup._construct_endpoint(s3_parameters) == "https://s3.amazonaws.com" # Test with an AWS endpoint with region. s3_parameters["region"] = "us-east-1" - tc.assertEqual( - harness.charm.backup._construct_endpoint(s3_parameters), - "https://s3.us-east-1.amazonaws.com", + assert ( + harness.charm.backup._construct_endpoint(s3_parameters) + == "https://s3.us-east-1.amazonaws.com" ) # Test with another cloud endpoint. s3_parameters["endpoint"] = "https://storage.googleapis.com" - tc.assertEqual( - harness.charm.backup._construct_endpoint(s3_parameters), "https://storage.googleapis.com" + assert ( + harness.charm.backup._construct_endpoint(s3_parameters) == "https://storage.googleapis.com" ) @@ -449,8 +419,11 @@ def test_create_bucket_if_not_exists(harness, tls_ca_chain_filename): [], ) _resource.side_effect = ValueError - with tc.assertRaises(ValueError): + try: harness.charm.backup._create_bucket_if_not_exists() + assert False + except ValueError: + pass # Test when the bucket already exists. _resource.reset_mock() @@ -485,8 +458,11 @@ def test_create_bucket_if_not_exists(harness, tls_ca_chain_filename): error_response={"Error": {"Code": 1, "message": "fake error"}}, operation_name="fake operation name", ) - with tc.assertRaises(ClientError): + try: harness.charm.backup._create_bucket_if_not_exists() + assert False + except ClientError: + pass head_bucket.assert_called_once() create.assert_called_once() wait_until_exists.assert_not_called() @@ -497,8 +473,11 @@ def test_empty_data_files(harness): # Test when the removal of the data files fails. command = "rm -r /var/lib/postgresql/data/pgdata".split() _exec.side_effect = ExecError(command=command, exit_code=1, stdout="", stderr="fake error") - with tc.assertRaises(ExecError): + try: harness.charm.backup._empty_data_files() + assert False + except ExecError: + pass _exec.assert_called_once_with(command) # Test when data files are successfully removed. @@ -521,19 +500,15 @@ def test_change_connectivity_to_database(harness): # Test when connectivity should be turned on. harness.charm.backup._change_connectivity_to_database(True) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.unit), - {"connectivity": "on"}, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {"connectivity": "on"} _update_config.assert_called_once() # Test when connectivity should be turned off. _update_config.reset_mock() harness.charm.backup._change_connectivity_to_database(False) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.unit), - {"connectivity": "off"}, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == { + "connectivity": "off" + } _update_config.assert_called_once() @@ -556,24 +531,22 @@ def test_execute_command(harness): ), ) _exec.return_value.wait_output.return_value = ("fake stdout", "") - tc.assertEqual(harness.charm.backup._execute_command(command), (None, None)) + assert harness.charm.backup._execute_command(command) == (None, None) _exec.assert_called_once_with(command, user="postgres", group="postgres", timeout=None) # Test when the command runs successfully. _exec.reset_mock() _exec.side_effect = None - tc.assertEqual( - harness.charm.backup._execute_command(command, timeout=5), ("fake stdout", "") - ) + assert harness.charm.backup._execute_command(command, timeout=5) == ("fake stdout", "") _exec.assert_called_once_with(command, user="postgres", group="postgres", timeout=5) def test_format_backup_list(harness): # Test when there are no backups. - tc.assertEqual( - harness.charm.backup._format_backup_list([]), - """backup-id | backup-type | backup-status -----------------------------------------------------""", + assert ( + harness.charm.backup._format_backup_list([]) + == """backup-id | backup-type | backup-status +----------------------------------------------------""" ) # Test when there are backups. @@ -581,12 +554,12 @@ def test_format_backup_list(harness): ("2023-01-01T09:00:00Z", "full", "failed: fake error"), ("2023-01-01T10:00:00Z", "full", "finished"), ] - tc.assertEqual( - harness.charm.backup._format_backup_list(backup_list), - """backup-id | backup-type | backup-status + assert ( + harness.charm.backup._format_backup_list(backup_list) + == """backup-id | backup-type | backup-status ---------------------------------------------------- 2023-01-01T09:00:00Z | full | failed: fake error -2023-01-01T10:00:00Z | full | finished""", +2023-01-01T10:00:00Z | full | finished""" ) @@ -594,10 +567,10 @@ def test_generate_backup_list_output(harness): with patch("charm.PostgreSQLBackups._execute_command") as _execute_command: # Test when no backups are returned. _execute_command.return_value = ('[{"backup":[]}]', None) - tc.assertEqual( - harness.charm.backup._generate_backup_list_output(), - """backup-id | backup-type | backup-status -----------------------------------------------------""", + assert ( + harness.charm.backup._generate_backup_list_output() + == """backup-id | backup-type | backup-status +----------------------------------------------------""" ) # Test when backups are returned. @@ -605,12 +578,12 @@ def test_generate_backup_list_output(harness): '[{"backup":[{"label":"20230101-090000F","error":"fake error"},{"label":"20230101-100000F","error":null}]}]', None, ) - tc.assertEqual( - harness.charm.backup._generate_backup_list_output(), - """backup-id | backup-type | backup-status + assert ( + harness.charm.backup._generate_backup_list_output() + == """backup-id | backup-type | backup-status ---------------------------------------------------- 2023-01-01T09:00:00Z | full | failed: fake error -2023-01-01T10:00:00Z | full | finished""", +2023-01-01T10:00:00Z | full | finished""" ) @@ -618,28 +591,22 @@ def test_list_backups(harness): with patch("charm.PostgreSQLBackups._execute_command") as _execute_command: # Test when no backups are available. _execute_command.return_value = ("[]", None) - tc.assertEqual( - harness.charm.backup._list_backups(show_failed=True), OrderedDict[str, str]() - ) + assert harness.charm.backup._list_backups(show_failed=True) == OrderedDict[str, str]() # Test when some backups are available. _execute_command.return_value = ( '[{"backup":[{"label":"20230101-090000F","error":"fake error"},{"label":"20230101-100000F","error":null}],"name":"test-stanza"}]', None, ) - tc.assertEqual( - harness.charm.backup._list_backups(show_failed=True), - OrderedDict[str, str]([ - ("2023-01-01T09:00:00Z", "test-stanza"), - ("2023-01-01T10:00:00Z", "test-stanza"), - ]), - ) + assert harness.charm.backup._list_backups(show_failed=True) == OrderedDict[str, str]([ + ("2023-01-01T09:00:00Z", "test-stanza"), + ("2023-01-01T10:00:00Z", "test-stanza"), + ]) # Test when some backups are available, but it's not desired to list failed backups. - tc.assertEqual( - harness.charm.backup._list_backups(show_failed=False), - OrderedDict[str, str]([("2023-01-01T10:00:00Z", "test-stanza")]), - ) + assert harness.charm.backup._list_backups(show_failed=False) == OrderedDict[str, str]([ + ("2023-01-01T10:00:00Z", "test-stanza") + ]) def test_initialise_stanza(harness): @@ -685,13 +652,11 @@ def test_initialise_stanza(harness): harness.charm.unit.status = BlockedStatus(blocked_state) harness.charm.backup._initialise_stanza() _execute_command.assert_called_once_with(stanza_creation_command) - tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) - tc.assertEqual( - harness.charm.unit.status.message, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE - ) + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status.message == FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE # Assert there is no stanza name in the application relation databag. - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} # Test when the archiving is working correctly (pgBackRest check command succeeds) # and the unit is not the leader. @@ -701,15 +666,12 @@ def test_initialise_stanza(harness): _reload_patroni_configuration.reset_mock() _execute_command.side_effect = None harness.charm.backup._initialise_stanza() - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.unit), - { - "stanza": f"{harness.charm.model.name}.patroni-postgresql-k8s", - "init-pgbackrest": "True", - }, - ) - tc.assertIsInstance(harness.charm.unit.status, MaintenanceStatus) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == { + "stanza": f"{harness.charm.model.name}.patroni-postgresql-k8s", + "init-pgbackrest": "True", + } + assert isinstance(harness.charm.unit.status, MaintenanceStatus) # Test when the unit is the leader. with harness.hooks_disabled(): @@ -718,18 +680,12 @@ def test_initialise_stanza(harness): peer_rel_id, harness.charm.unit.name, {"stanza": "", "init-pgbackrest": ""} ) harness.charm.backup._initialise_stanza() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - { - "stanza": f"{harness.charm.model.name}.patroni-postgresql-k8s", - "init-pgbackrest": "True", - }, - ) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.unit), - {}, - ) - tc.assertIsInstance(harness.charm.unit.status, MaintenanceStatus) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": f"{harness.charm.model.name}.patroni-postgresql-k8s", + "init-pgbackrest": "True", + } + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert isinstance(harness.charm.unit.status, MaintenanceStatus) def test_check_stanza(harness): @@ -776,15 +732,13 @@ def test_check_stanza(harness): ) _member_started.return_value = True harness.charm.backup.check_stanza() - tc.assertEqual(_update_config.call_count, 2) - tc.assertEqual(_member_started.call_count, 5) - tc.assertEqual(_reload_patroni_configuration.call_count, 5) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) - tc.assertEqual( - harness.charm.unit.status.message, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE - ) + assert _update_config.call_count == 2 + assert _member_started.call_count == 5 + assert _reload_patroni_configuration.call_count == 5 + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status.message == FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE # Test when the archiving is working correctly (pgBackRest check command succeeds) # and the unit is not the leader. @@ -808,15 +762,14 @@ def test_check_stanza(harness): _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_called_once() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": "test-stanza", "init-pgbackrest": "True"}, - ) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.unit), - {"stanza": "test-stanza"}, - ) - tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": "test-stanza", + "init-pgbackrest": "True", + } + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == { + "stanza": "test-stanza" + } + assert isinstance(harness.charm.unit.status, ActiveStatus) # Test when the unit is the leader. harness.charm.unit.status = BlockedStatus("fake blocked state") @@ -839,15 +792,13 @@ def test_check_stanza(harness): _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_called_once() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": "test-stanza"}, - ) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.unit), - {"stanza": "test-stanza"}, - ) - tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": "test-stanza" + } + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == { + "stanza": "test-stanza" + } + assert isinstance(harness.charm.unit.status, ActiveStatus) def test_coordinate_stanza_fields(harness): @@ -859,9 +810,9 @@ def test_coordinate_stanza_fields(harness): # Test when the stanza name is neither in the application relation databag nor in the unit relation databag. harness.charm.backup.coordinate_stanza_fields() - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert harness.get_relation_data(peer_rel_id, new_unit) == {} # Test when the stanza name is in the unit relation databag but the unit is not the leader. stanza_name = f"{harness.charm.model.name}.patroni-{harness.charm.app.name}" @@ -870,61 +821,52 @@ def test_coordinate_stanza_fields(harness): peer_rel_id, new_unit_name, {"stanza": stanza_name, "init-pgbackrest": "True"} ) harness.charm.backup.coordinate_stanza_fields() - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, new_unit), - {"stanza": stanza_name, "init-pgbackrest": "True"}, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert harness.get_relation_data(peer_rel_id, new_unit) == { + "stanza": stanza_name, + "init-pgbackrest": "True", + } # Test when the unit is the leader. with harness.hooks_disabled(): harness.set_leader() harness.charm.backup.coordinate_stanza_fields() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": stanza_name, "init-pgbackrest": "True"}, - ) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, new_unit), - {"stanza": stanza_name, "init-pgbackrest": "True"}, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": stanza_name, + "init-pgbackrest": "True", + } + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert harness.get_relation_data(peer_rel_id, new_unit) == { + "stanza": stanza_name, + "init-pgbackrest": "True", + } # Test when the stanza was already checked in the primary non-leader unit. with harness.hooks_disabled(): harness.update_relation_data(peer_rel_id, new_unit_name, {"init-pgbackrest": ""}) harness.charm.backup.coordinate_stanza_fields() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": stanza_name}, - ) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {"stanza": stanza_name}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {"stanza": stanza_name} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert harness.get_relation_data(peer_rel_id, new_unit) == {"stanza": stanza_name} # Test when the "init-pgbackrest" flag was removed from the application relation databag # and this is the unit that has the stanza name in the unit relation databag. with harness.hooks_disabled(): harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"stanza": stanza_name}) harness.charm.backup.coordinate_stanza_fields() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": stanza_name}, - ) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {"stanza": stanza_name}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {"stanza": stanza_name} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert harness.get_relation_data(peer_rel_id, new_unit) == {"stanza": stanza_name} # Test when the unit is not the leader. with harness.hooks_disabled(): harness.set_leader(False) harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"stanza": stanza_name}) harness.charm.backup.coordinate_stanza_fields() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": stanza_name}, - ) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {"stanza": stanza_name}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {"stanza": stanza_name} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} + assert harness.get_relation_data(peer_rel_id, new_unit) == {"stanza": stanza_name} def test_is_primary_pgbackrest_service_running(harness): @@ -935,13 +877,13 @@ def test_is_primary_pgbackrest_service_running(harness): ): # Test when the charm fails to get the current primary. _get_primary.side_effect = RetryError(last_attempt=1) - tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, False) + assert harness.charm.backup._is_primary_pgbackrest_service_running is False _execute_command.assert_not_called() # Test when the primary was not elected yet. _get_primary.side_effect = None _get_primary.return_value = None - tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, False) + assert harness.charm.backup._is_primary_pgbackrest_service_running is False _execute_command.assert_not_called() # Test when the pgBackRest fails to contact the primary server. @@ -949,13 +891,13 @@ def test_is_primary_pgbackrest_service_running(harness): _execute_command.side_effect = ExecError( command="fake command".split(), exit_code=1, stdout="", stderr="fake error" ) - tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, False) + assert harness.charm.backup._is_primary_pgbackrest_service_running is False _execute_command.assert_called_once() # Test when the pgBackRest succeeds on contacting the primary server. _execute_command.reset_mock() _execute_command.side_effect = None - tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, True) + assert harness.charm.backup._is_primary_pgbackrest_service_running is True _execute_command.assert_called_once() @@ -1036,7 +978,7 @@ def test_on_s3_credential_changed(harness): _render_pgbackrest_conf_file.assert_called_once() _is_primary.assert_called_once() _create_bucket_if_not_exists.assert_not_called() - tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + assert isinstance(harness.charm.unit.status, ActiveStatus) _can_use_s3_repository.assert_not_called() _initialise_stanza.assert_not_called() @@ -1058,9 +1000,9 @@ def test_on_s3_credential_changed(harness): ) _render_pgbackrest_conf_file.assert_called_once() _create_bucket_if_not_exists.assert_called_once() - tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) - tc.assertEqual( - harness.charm.unit.status.message, FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert ( + harness.charm.unit.status.message == FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE ) _can_use_s3_repository.assert_not_called() _initialise_stanza.assert_not_called() @@ -1072,8 +1014,8 @@ def test_on_s3_credential_changed(harness): harness.charm.backup.s3_client.on.credentials_changed.emit( relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) - tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) - tc.assertEqual(harness.charm.unit.status.message, "fake validation message") + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status.message == "fake validation message" _create_bucket_if_not_exists.assert_called_once() _can_use_s3_repository.assert_called_once() _initialise_stanza.assert_not_called() @@ -1093,12 +1035,12 @@ def test_on_s3_credential_gone(harness): # Test that unrelated blocks will remain harness.charm.unit.status = BlockedStatus("test block") harness.charm.backup._on_s3_credential_gone(None) - tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + assert isinstance(harness.charm.unit.status, BlockedStatus) # Test that s3 related blocks will be cleared harness.charm.unit.status = BlockedStatus(ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE) harness.charm.backup._on_s3_credential_gone(None) - tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + assert isinstance(harness.charm.unit.status, ActiveStatus) # Test removal of relation data when the unit is not the leader. with harness.hooks_disabled(): @@ -1113,11 +1055,11 @@ def test_on_s3_credential_gone(harness): {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) harness.charm.backup._on_s3_credential_gone(None) - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - {"stanza": "test-stanza", "init-pgbackrest": "True"}, - ) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "stanza": "test-stanza", + "init-pgbackrest": "True", + } + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} # Test removal of relation data when the unit is the leader. with harness.hooks_disabled(): @@ -1128,8 +1070,8 @@ def test_on_s3_credential_gone(harness): {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) harness.charm.backup._on_s3_credential_gone(None) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} + assert harness.get_relation_data(peer_rel_id, harness.charm.unit) == {} def test_on_create_backup_action(harness): @@ -1285,7 +1227,7 @@ def test_on_create_backup_action(harness): mock_s3_parameters, ), ]) - tc.assertEqual(_change_connectivity_to_database.call_count, 2) + assert _change_connectivity_to_database.call_count == 2 mock_event.fail.assert_not_called() mock_event.set_results.assert_called_once_with({"backup-status": "backup created"}) @@ -1365,7 +1307,7 @@ def test_on_restore_action(harness): _start.assert_not_called() mock_event.fail.assert_not_called() mock_event.set_results.assert_not_called() - tc.assertNotIsInstance(harness.charm.unit.status, MaintenanceStatus) + assert not isinstance(harness.charm.unit.status, MaintenanceStatus) # Test when the user provides an invalid backup id. mock_event.params = {"backup-id": "2023-01-01T10:00:00Z"} @@ -1384,7 +1326,7 @@ def test_on_restore_action(harness): _update_config.assert_not_called() _start.assert_not_called() mock_event.set_results.assert_not_called() - tc.assertNotIsInstance(harness.charm.unit.status, MaintenanceStatus) + assert not isinstance(harness.charm.unit.status, MaintenanceStatus) # Test when the charm fails to stop the workload. mock_event.reset_mock() @@ -1420,7 +1362,7 @@ def test_on_restore_action(harness): _stop.side_effect = None _delete.side_effect = [None, _FakeApiError] harness.charm.backup._on_restore_action(mock_event) - tc.assertEqual(_delete.call_count, 2) + assert _delete.call_count == 2 mock_event.fail.assert_called_once() _restart_database.assert_called_once() _empty_data_files.assert_not_called() @@ -1450,16 +1392,13 @@ def test_on_restore_action(harness): _restart_database.reset_mock() _empty_data_files.side_effect = None _fetch_backup_from_id.return_value = "20230101-090000F" - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} harness.charm.backup._on_restore_action(mock_event) _restart_database.assert_not_called() - tc.assertEqual( - harness.get_relation_data(peer_rel_id, harness.charm.app), - { - "restoring-backup": "20230101-090000F", - "restore-stanza": f"{harness.charm.model.name}.{harness.charm.cluster_name}", - }, - ) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == { + "restoring-backup": "20230101-090000F", + "restore-stanza": f"{harness.charm.model.name}.{harness.charm.cluster_name}", + } _create_pgdata.assert_called_once() _update_config.assert_called_once() _start.assert_called_once_with("postgresql") @@ -1475,19 +1414,19 @@ def test_pre_restore_checks(harness): # Test when S3 parameters are not ok. mock_event = MagicMock() _are_backup_settings_ok.return_value = (False, "fake error message") - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) + assert harness.charm.backup._pre_restore_checks(mock_event) is False mock_event.fail.assert_called_once() # Test when no backup id is provided. mock_event.reset_mock() _are_backup_settings_ok.return_value = (True, None) - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) + assert harness.charm.backup._pre_restore_checks(mock_event) is False mock_event.fail.assert_called_once() # Test when the workload container is not accessible yet. mock_event.reset_mock() mock_event.params = {"backup-id": "2023-01-01T09:00:00Z"} - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) + assert harness.charm.backup._pre_restore_checks(mock_event) is False mock_event.fail.assert_called_once() # Test when the unit is in a blocked state that is not recoverable by changing @@ -1495,7 +1434,7 @@ def test_pre_restore_checks(harness): mock_event.reset_mock() harness.set_can_connect("postgresql", True) harness.charm.unit.status = BlockedStatus("fake blocked state") - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) + assert harness.charm.backup._pre_restore_checks(mock_event) is False mock_event.fail.assert_called_once() # Test when the unit is in a blocked state that is recoverable by changing S3 parameters, @@ -1503,20 +1442,20 @@ def test_pre_restore_checks(harness): mock_event.reset_mock() harness.charm.unit.status = BlockedStatus(ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE) _planned_units.return_value = 2 - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) + assert harness.charm.backup._pre_restore_checks(mock_event) is False mock_event.fail.assert_called_once() # Test when the cluster has only one unit, but it's not the leader yet. mock_event.reset_mock() _planned_units.return_value = 1 - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) + assert harness.charm.backup._pre_restore_checks(mock_event) is False mock_event.fail.assert_called_once() # Test when everything is ok to run a restore. mock_event.reset_mock() with harness.hooks_disabled(): harness.set_leader() - tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), True) + assert harness.charm.backup._pre_restore_checks(mock_event) is True mock_event.fail.assert_not_called() @@ -1590,7 +1529,7 @@ def test_render_pgbackrest_conf_file(harness, tls_ca_chain_filename): harness.charm.backup._render_pgbackrest_conf_file() # Check the template is opened read-only in the call to open. - tc.assertEqual(mock.call_args_list[0][0], ("templates/pgbackrest.conf.j2", "r")) + assert mock.call_args_list[0][0] == ("templates/pgbackrest.conf.j2", "r") # Ensure the correct rendered template is sent to _render_file method. calls = [call("/etc/pgbackrest.conf", expected_content, user="postgres", group="postgres")] @@ -1619,7 +1558,7 @@ def test_restart_database(harness): harness.charm.backup._restart_database() # Assert that the backup id is not in the application relation databag anymore. - tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + assert harness.get_relation_data(peer_rel_id, harness.charm.app) == {} _update_config.assert_called_once() _start.assert_called_once_with("postgresql") @@ -1633,9 +1572,9 @@ def test_retrieve_s3_parameters( ) as _get_s3_connection_info: # Test when there are missing S3 parameters. _get_s3_connection_info.return_value = {} - tc.assertEqual( - harness.charm.backup._retrieve_s3_parameters(), - ({}, ["bucket", "access-key", "secret-key"]), + assert harness.charm.backup._retrieve_s3_parameters() == ( + {}, + ["bucket", "access-key", "secret-key"], ) # Test when only the required parameters are provided. @@ -1644,21 +1583,18 @@ def test_retrieve_s3_parameters( "access-key": "test-access-key", "secret-key": "test-secret-key", } - tc.assertEqual( - harness.charm.backup._retrieve_s3_parameters(), - ( - { - "access-key": "test-access-key", - "bucket": "test-bucket", - "delete-older-than-days": "9999999", - "endpoint": "https://s3.amazonaws.com", - "path": "/", - "region": None, - "s3-uri-style": "host", - "secret-key": "test-secret-key", - }, - [], - ), + assert harness.charm.backup._retrieve_s3_parameters() == ( + { + "access-key": "test-access-key", + "bucket": "test-bucket", + "delete-older-than-days": "9999999", + "endpoint": "https://s3.amazonaws.com", + "path": "/", + "region": None, + "s3-uri-style": "host", + "secret-key": "test-secret-key", + }, + [], ) # Test when all parameters are provided. @@ -1672,21 +1608,18 @@ def test_retrieve_s3_parameters( "s3-uri-style": " path ", "delete-older-than-days": "30", } - tc.assertEqual( - harness.charm.backup._retrieve_s3_parameters(), - ( - { - "access-key": "test-access-key", - "bucket": "test-bucket", - "endpoint": "https://storage.googleapis.com", - "path": "/test-path", - "region": "us-east-1", - "s3-uri-style": "path", - "secret-key": "test-secret-key", - "delete-older-than-days": "30", - }, - [], - ), + assert harness.charm.backup._retrieve_s3_parameters() == ( + { + "access-key": "test-access-key", + "bucket": "test-bucket", + "endpoint": "https://storage.googleapis.com", + "path": "/test-path", + "region": "us-east-1", + "s3-uri-style": "path", + "secret-key": "test-secret-key", + "delete-older-than-days": "30", + }, + [], ) @@ -1714,30 +1647,21 @@ def test_start_stop_pgbackrest_service(harness): ): # Test when S3 parameters are not ok (no operation, but returns success). _are_backup_settings_ok.return_value = (False, "fake error message") - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - True, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is True _stop.assert_not_called() _restart.assert_not_called() # Test when it was not possible to render the pgBackRest configuration file. _are_backup_settings_ok.return_value = (True, None) _render_pgbackrest_conf_file.return_value = False - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - False, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is False _stop.assert_not_called() _restart.assert_not_called() # Test when TLS is not enabled (should stop the service). _render_pgbackrest_conf_file.return_value = True _is_tls_enabled.return_value = False - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - True, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is True _stop.assert_called_once() _restart.assert_not_called() @@ -1745,10 +1669,7 @@ def test_start_stop_pgbackrest_service(harness): _stop.reset_mock() _is_tls_enabled.return_value = True _peer_members_endpoints.return_value = [] - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - True, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is True _stop.assert_called_once() _restart.assert_not_called() @@ -1757,19 +1678,13 @@ def test_start_stop_pgbackrest_service(harness): _peer_members_endpoints.return_value = ["fake-member-endpoint"] _is_primary.return_value = False _is_primary_pgbackrest_service_running.return_value = False - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - False, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is False _stop.assert_not_called() _restart.assert_not_called() # Test when the service has already started in the primary. _is_primary_pgbackrest_service_running.return_value = True - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - True, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is True _stop.assert_not_called() _restart.assert_called_once() @@ -1777,10 +1692,7 @@ def test_start_stop_pgbackrest_service(harness): _restart.reset_mock() _is_primary.return_value = True _is_primary_pgbackrest_service_running.return_value = False - tc.assertEqual( - harness.charm.backup.start_stop_pgbackrest_service(), - True, - ) + assert harness.charm.backup.start_stop_pgbackrest_service() is True _stop.assert_not_called() _restart.assert_called_once() @@ -1815,10 +1727,7 @@ def test_upload_content_to_s3(harness, tls_ca_chain_filename): _resource.side_effect = ValueError _construct_endpoint.return_value = "https://s3.us-east-1.amazonaws.com" _named_temporary_file.return_value.__enter__.return_value.name = "/tmp/test-file" - tc.assertEqual( - harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), - False, - ) + assert harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters) is False _resource.assert_called_once_with( "s3", endpoint_url="https://s3.us-east-1.amazonaws.com", @@ -1830,10 +1739,7 @@ def test_upload_content_to_s3(harness, tls_ca_chain_filename): _resource.reset_mock() _resource.side_effect = None upload_file.side_effect = S3UploadFailedError - tc.assertEqual( - harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), - False, - ) + assert harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters) is False _resource.assert_called_once_with( "s3", endpoint_url="https://s3.us-east-1.amazonaws.com", @@ -1847,10 +1753,7 @@ def test_upload_content_to_s3(harness, tls_ca_chain_filename): _named_temporary_file.reset_mock() upload_file.reset_mock() upload_file.side_effect = None - tc.assertEqual( - harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), - True, - ) + assert harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters) is True _resource.assert_called_once_with( "s3", endpoint_url="https://s3.us-east-1.amazonaws.com", diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 0b0e9d198c..ab98c18d39 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -5,10 +5,12 @@ import logging from datetime import datetime from unittest import TestCase -from unittest.mock import MagicMock, Mock, PropertyMock, patch +from unittest.mock import MagicMock, Mock, PropertyMock, patch, sentinel +import psycopg2 import pytest from charms.postgresql_k8s.v0.postgresql import PostgreSQLUpdateUserPasswordError +from lightkube import ApiError from lightkube.resources.core_v1 import Endpoints, Pod, Service from ops.model import ( ActiveStatus, @@ -24,6 +26,7 @@ from charm import PostgresqlOperatorCharm from constants import PEER, SECRET_INTERNAL_LABEL +from patroni import NotReadyError from tests.helpers import patch_network_get from tests.unit.helpers import _FakeApiError @@ -57,7 +60,7 @@ def test_on_leader_elected(harness): patch("charm.PostgresqlOperatorCharm.set_secret") as _set_secret, patch("charm.Patroni.reload_patroni_configuration"), patch("charm.PostgresqlOperatorCharm._patch_pod_labels"), - patch("charm.PostgresqlOperatorCharm._create_services"), + patch("charm.PostgresqlOperatorCharm._create_services") as _create_services, ): rel_id = harness.model.get_relation(PEER).id # Check that a new password was generated on leader election and nothing is done @@ -104,19 +107,59 @@ def test_on_leader_elected(harness): namespace=harness.charm.model.name, obj={"metadata": {"annotations": {"leader": "postgresql-k8s-0"}}}, ) - tc.assertNotIn("cluster_initialised", harness.get_relation_data(rel_id, harness.charm.app)) + assert "cluster_initialised" not in harness.get_relation_data(rel_id, harness.charm.app) # Test a failure in fixing the "leader" key in the endpoint annotations. _client.return_value.patch.side_effect = _FakeApiError - with tc.assertRaises(_FakeApiError): + try: harness.set_leader(False) harness.set_leader() + assert False + except _FakeApiError: + pass # Test no failure if the resource doesn't exist. _client.return_value.patch.side_effect = _FakeApiError(404) harness.set_leader(False) harness.set_leader() + # K8s api error when creating services + response = Mock() + response.json.return_value = {"code": 403} + _create_services.side_effect = ApiError(response=response) + harness.set_leader(False) + harness.set_leader() + + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status.message == "failed to create k8s services" + + # No trust when annotating + _client.return_value.get.side_effect = ApiError(response=response) + harness.set_leader(False) + harness.set_leader() + + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert ( + harness.charm.unit.status.message + == "Insufficient permissions, try: `juju trust postgresql-k8s --scope=cluster`" + ) + + +@patch_network_get(private_address="1.1.1.1") +def test_get_unit_ip(harness): + with ( + patch("charm.PostgresqlOperatorCharm._peers", new_callable=PropertyMock) as _peers, + ): + _peers.return_value.data = {sentinel.unit: {"private-address": "2.2.2.2"}} + + # Current host + assert harness.charm.get_unit_ip(harness.charm.unit) == "1.1.1.1" + + # Not existing unit + assert harness.charm.get_unit_ip(None) is None + + assert harness.charm.get_unit_ip(sentinel.unit) == "2.2.2.2" + @patch_network_get(private_address="1.1.1.1") def test_on_postgresql_pebble_ready(harness): @@ -205,6 +248,71 @@ def test_on_postgresql_pebble_ready_no_connection(harness): tc.assertIsInstance(harness.model.unit.status, MaintenanceStatus) +def test_on_config_changed(harness): + with ( + patch( + "charm.PostgresqlOperatorCharm.is_cluster_initialised", + new_callable=PropertyMock, + return_value=False, + ) as _is_cluster_initialised, + patch( + "charm.PostgresqlOperatorCharm._validate_config_options" + ) as _validate_config_options, + patch( + "charm.PostgreSQLUpgrade.idle", return_value=False, new_callable=PropertyMock + ) as _idle, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.Patroni.member_started", return_value=True, new_callable=PropertyMock), + patch("charm.Patroni.get_primary"), + patch( + "charm.PostgresqlOperatorCharm.is_standby_leader", + return_value=False, + new_callable=PropertyMock, + ), + patch( + "charm.PostgresqlOperatorCharm.enable_disable_extensions" + ) as _enable_disable_extensions, + ): + # Defers if cluster is not initialised + mock_event = Mock() + harness.charm._on_config_changed(mock_event) + mock_event.defer.assert_called_once_with() + mock_event.defer.reset_mock() + + # Defers if upgrade is not idle + _is_cluster_initialised.return_value = True + mock_event = Mock() + harness.charm._on_config_changed(mock_event) + mock_event.defer.assert_called_once_with() + mock_event.defer.reset_mock() + + # Deferst on db connection error + _idle.return_value = True + _validate_config_options.side_effect = psycopg2.OperationalError + harness.charm._on_config_changed(mock_event) + mock_event.defer.assert_called_once_with() + mock_event.defer.reset_mock() + + # Blocks if validation fails + _validate_config_options.side_effect = ValueError + harness.charm._on_config_changed(mock_event) + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status.message == "Configuration Error. Please check the logs" + + # Clears status if validation passes + _validate_config_options.side_effect = None + harness.charm._on_config_changed(mock_event) + assert isinstance(harness.charm.unit.status, ActiveStatus) + assert not _enable_disable_extensions.called + + # Leader enables extensions + with harness.hooks_disabled(): + harness.set_leader() + harness.charm._on_config_changed(mock_event) + assert isinstance(harness.charm.unit.status, ActiveStatus) + _enable_disable_extensions.assert_called_once_with() + + def test_on_get_password(harness): # Create a mock event and set passwords in peer relation data. mock_event = MagicMock(params={}) @@ -402,6 +510,141 @@ def test_on_update_status_with_error_on_get_primary(harness): ) +def test_add_cluster_member(harness): + with ( + patch("charm.PostgresqlOperatorCharm._get_hostname_from_unit", return_value="hostname"), + patch("charm.PostgresqlOperatorCharm._patch_pod_labels") as _patch_pod_labels, + patch("charm.PostgresqlOperatorCharm._add_to_endpoints") as _add_to_endpoints, + patch("charm.Patroni.are_all_members_ready") as _are_all_members_ready, + ): + harness.charm.add_cluster_member(sentinel.member) + + _add_to_endpoints.assert_called_once_with("hostname") + _patch_pod_labels.assert_called_once_with(sentinel.member) + + # Block on k8s error + response = Mock() + response.json.return_value = {} + _patch_pod_labels.side_effect = ApiError(response=response) + harness.charm.add_cluster_member(sentinel.member) + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert harness.charm.unit.status.message == "failed to patch pod with error None" + + # Not ready error if not all members are readu + _are_all_members_ready.return_value = False + try: + harness.charm.add_cluster_member(sentinel.member) + assert False + except NotReadyError: + pass + + +def test_enable_disable_extensions(harness): + with ( + patch("charm.CharmConfig.plugin_keys") as _plugin_keys, + patch("charm.PostgreSQL.enable_disable_extensions") as _enable_disable_extensions, + patch("charm.Patroni.get_primary", return_value=None) as _get_primary, + patch("charm.Patroni.member_started", return_value=True, new_callable=PropertyMock), + patch( + "charm.PostgresqlOperatorCharm.is_standby_leader", + return_value=False, + new_callable=PropertyMock, + ), + ): + # Early exit if no primary + harness.charm.enable_disable_extensions() + assert not _plugin_keys.called + + # Blocks on missing extension dependencies + with harness.hooks_disabled(): + harness.update_config({ + "plugin_cube_enable": False, + "plugin_spi_enable": False, + "plugin_jsonb_plperl_enable": True, + "plugin_plperl_enable": False, + }) + _get_primary.return_value = "primary" + _plugin_keys.return_value = [ + "plugin_cube_enable", + "plugin_spi_enable", + "plugin_jsonb_plperl_enable", + "plugin_plperl_enable", + ] + harness.charm.enable_disable_extensions() + assert isinstance(harness.charm.unit.status, BlockedStatus) + assert ( + harness.charm.unit.status.message + == "Unsatisfied plugin dependencies. Please check the logs" + ) + + # Unblock if dependencies are met + with harness.hooks_disabled(): + harness.update_config({ + "plugin_plperl_enable": True, + }) + harness.charm.enable_disable_extensions() + assert isinstance(harness.charm.unit.status, ActiveStatus) + _enable_disable_extensions.assert_called_once_with( + { + "cube": False, + "refint": False, + "autoinc": False, + "insert_username": False, + "moddatetime": False, + "jsonb_plperl": True, + "plperl": True, + }, + None, + ) + + +def test_on_peer_relation_departed(harness): + with ( + patch( + "charm.PostgresqlOperatorCharm._get_endpoints_to_remove" + ) as _get_endpoints_to_remove, + patch("charm.PostgresqlOperatorCharm._peers", new_callable=PropertyMock) as _peers, + patch( + "charm.PostgresqlOperatorCharm._get_endpoints_to_remove", return_value=sentinel.units + ) as _get_endpoints_to_remove, + patch("charm.PostgresqlOperatorCharm._remove_from_endpoints") as _remove_from_endpoints, + ): + # Early exit if not leader + event = Mock() + harness.charm._on_peer_relation_departed(event) + assert not _get_endpoints_to_remove.called + + # Defer if cluster is not initialised + with harness.hooks_disabled(): + harness.set_leader() + harness.charm._on_peer_relation_departed(event) + event.defer.assert_called_once_with() + + _peers.return_value.data = {harness.charm.app: {"cluster_initialised": True}} + harness.charm._on_peer_relation_departed(event) + _get_endpoints_to_remove.assert_called_once_with() + _remove_from_endpoints.assert_called_once_with(sentinel.units) + + +def test_on_pgdata_storage_detaching(harness): + with ( + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.Patroni.are_all_members_ready", new_callable=PropertyMock), + patch("charm.Patroni.get_primary", return_value="primary") as _get_primary, + patch("charm.Patroni.switchover") as _switchover, + patch("charm.Patroni.primary_changed") as _primary_changed, + ): + # Early exit if not primary + event = Mock() + harness.charm._on_pgdata_storage_detaching(event) + assert not _member_started.called + + _get_primary.side_effect = [harness.charm.unit.name, "primary"] + harness.charm._on_pgdata_storage_detaching(event) + _switchover.assert_called_once_with() + _primary_changed.assert_called_once_with("primary") + + def test_on_update_status_after_restore_operation(harness): with ( patch("charm.PostgresqlOperatorCharm._set_active_status") as _set_active_status, @@ -1495,6 +1738,4 @@ def test_set_active_status(harness): ) else: _get_primary.side_effect = values[0] - _get_primary.return_value = None - harness.charm._set_active_status() - assert isinstance(harness.charm.unit.status, MaintenanceStatus) + _get_primary.return_value diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py index bf2c894c2d..2848f8f013 100644 --- a/tests/unit/test_db.py +++ b/tests/unit/test_db.py @@ -1,7 +1,6 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -from unittest import TestCase from unittest.mock import Mock, PropertyMock, patch import pytest @@ -22,9 +21,6 @@ RELATION_NAME = "db" POSTGRESQL_VERSION = "14" -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -116,7 +112,7 @@ def test_on_relation_changed(harness): {"cluster_initialised": "True"}, ) request_database(harness) - tc.assertEqual(_defer.call_count, 2) + assert _defer.call_count == 2 _set_up_relation.assert_not_called() # Request a database to a non leader unit. @@ -140,7 +136,7 @@ def test_get_extensions(harness): # Test when there are no extensions in the relation databags. rel_id = harness.model.get_relation(RELATION_NAME).id relation = harness.model.get_relation(RELATION_NAME, rel_id) - tc.assertEqual(harness.charm.legacy_db_relation._get_extensions(relation), ([], set())) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ([], set()) # Test when there are extensions in the application relation databag. extensions = ["", "citext:public", "debversion"] @@ -150,9 +146,9 @@ def test_get_extensions(harness): "application", {"extensions": ",".join(extensions)}, ) - tc.assertEqual( - harness.charm.legacy_db_relation._get_extensions(relation), - ([extensions[1], extensions[2]], {extensions[1].split(":")[0], extensions[2]}), + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[1].split(":")[0], extensions[2]}, ) # Test when there are extensions in the unit relation databag. @@ -167,9 +163,9 @@ def test_get_extensions(harness): "application/0", {"extensions": ",".join(extensions)}, ) - tc.assertEqual( - harness.charm.legacy_db_relation._get_extensions(relation), - ([extensions[1], extensions[2]], {extensions[1].split(":")[0], extensions[2]}), + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[1].split(":")[0], extensions[2]}, ) # Test when one of the plugins/extensions is enabled. @@ -183,9 +179,9 @@ def test_get_extensions(harness): harness = Harness(PostgresqlOperatorCharm, config=config) harness.cleanup() harness.begin() - tc.assertEqual( - harness.charm.legacy_db_relation._get_extensions(relation), - ([extensions[1], extensions[2]], {extensions[2]}), + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[2]}, ) @@ -228,7 +224,7 @@ def test_set_up_relation(harness): # Assert no operation is done when at least one of the requested extensions # is disabled. relation = harness.model.get_relation(RELATION_NAME, rel_id) - tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert not harness.charm.legacy_db_relation.set_up_relation(relation) postgresql_mock.create_user.assert_not_called() postgresql_mock.create_database.assert_not_called() postgresql_mock.get_postgresql_version.assert_not_called() @@ -242,13 +238,13 @@ def test_set_up_relation(harness): "application", {"database": DATABASE}, ) - tc.assertTrue(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert harness.charm.legacy_db_relation.set_up_relation(relation) user = f"relation_id_{rel_id}" postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) postgresql_mock.create_database.assert_called_once_with( DATABASE, user, plugins=[], client_relations=[relation] ) - tc.assertEqual(postgresql_mock.get_postgresql_version.call_count, 2) + assert postgresql_mock.get_postgresql_version.call_count == 2 _update_unit_status.assert_called_once() expected_data = { "allowed-units": "application/0", @@ -266,9 +262,9 @@ def test_set_up_relation(harness): "user": f"relation_id_{rel_id}", "version": POSTGRESQL_VERSION, } - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), expected_data) - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), expected_data) - tc.assertNotIsInstance(harness.model.unit.status, BlockedStatus) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == expected_data + assert harness.get_relation_data(rel_id, harness.charm.unit.name) == expected_data + assert not isinstance(harness.model.unit.status, BlockedStatus) # Assert that the correct calls were made when the database name is # provided only in the unit databag. @@ -288,16 +284,16 @@ def test_set_up_relation(harness): {"database": DATABASE}, ) clear_relation_data(harness) - tc.assertTrue(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert harness.charm.legacy_db_relation.set_up_relation(relation) postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) postgresql_mock.create_database.assert_called_once_with( DATABASE, user, plugins=[], client_relations=[relation] ) - tc.assertEqual(postgresql_mock.get_postgresql_version.call_count, 2) + assert postgresql_mock.get_postgresql_version.call_count == 2 _update_unit_status.assert_called_once() - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), expected_data) - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), expected_data) - tc.assertNotIsInstance(harness.model.unit.status, BlockedStatus) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == expected_data + assert harness.get_relation_data(rel_id, harness.charm.unit.name) == expected_data + assert not isinstance(harness.model.unit.status, BlockedStatus) # Assert that the correct calls were made when the database name is not provided. postgresql_mock.create_user.reset_mock() @@ -311,15 +307,15 @@ def test_set_up_relation(harness): {"database": ""}, ) clear_relation_data(harness) - tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert not harness.charm.legacy_db_relation.set_up_relation(relation) postgresql_mock.create_user.assert_not_called() postgresql_mock.create_database.assert_not_called() postgresql_mock.get_postgresql_version.assert_not_called() _update_unit_status.assert_not_called() # No data is set in the databags by the database. - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), {}) - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), {}) - tc.assertNotIsInstance(harness.model.unit.status, BlockedStatus) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == {} + assert harness.get_relation_data(rel_id, harness.charm.unit.name) == {} + assert not isinstance(harness.model.unit.status, BlockedStatus) # BlockedStatus due to a PostgreSQLCreateUserError. with harness.hooks_disabled(): @@ -328,30 +324,30 @@ def test_set_up_relation(harness): "application", {"database": DATABASE}, ) - tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert not harness.charm.legacy_db_relation.set_up_relation(relation) postgresql_mock.create_database.assert_not_called() postgresql_mock.get_postgresql_version.assert_not_called() _update_unit_status.assert_not_called() - tc.assertIsInstance(harness.model.unit.status, BlockedStatus) + assert isinstance(harness.model.unit.status, BlockedStatus) # No data is set in the databags by the database. - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), {}) - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), {}) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == {} + assert harness.get_relation_data(rel_id, harness.charm.unit.name) == {} # BlockedStatus due to a PostgreSQLCreateDatabaseError. harness.charm.unit.status = ActiveStatus() - tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert not harness.charm.legacy_db_relation.set_up_relation(relation) postgresql_mock.get_postgresql_version.assert_not_called() _update_unit_status.assert_not_called() - tc.assertIsInstance(harness.model.unit.status, BlockedStatus) + assert isinstance(harness.model.unit.status, BlockedStatus) # No data is set in the databags by the database. - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), {}) - tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), {}) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == {} + assert harness.get_relation_data(rel_id, harness.charm.unit.name) == {} # BlockedStatus due to a PostgreSQLGetPostgreSQLVersionError. harness.charm.unit.status = ActiveStatus() - tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + assert not harness.charm.legacy_db_relation.set_up_relation(relation) _update_unit_status.assert_not_called() - tc.assertIsInstance(harness.model.unit.status, BlockedStatus) + assert isinstance(harness.model.unit.status, BlockedStatus) def test_update_unit_status(harness): @@ -369,35 +365,35 @@ def test_update_unit_status(harness): _has_blocked_status.return_value = False harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_not_called() - tc.assertNotIsInstance(harness.charm.unit.status, ActiveStatus) + assert not isinstance(harness.charm.unit.status, ActiveStatus) # Test when the charm is blocked but not due to extensions request. _has_blocked_status.return_value = True harness.charm.unit.status = BlockedStatus("fake message") harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_not_called() - tc.assertNotIsInstance(harness.charm.unit.status, ActiveStatus) + assert not isinstance(harness.charm.unit.status, ActiveStatus) # Test when there are relations causing the blocked status. harness.charm.unit.status = BlockedStatus("extensions requested through relation") _check_for_blocking_relations.return_value = True harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_called_once_with(relation.id) - tc.assertNotIsInstance(harness.charm.unit.status, ActiveStatus) + assert not isinstance(harness.charm.unit.status, ActiveStatus) # Test when there are no relations causing the blocked status anymore. _check_for_blocking_relations.reset_mock() _check_for_blocking_relations.return_value = False harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_called_once_with(relation.id) - tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + assert isinstance(harness.charm.unit.status, ActiveStatus) def test_on_relation_departed(harness): with patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)): # Test when this unit is departing the relation (due to a scale down event). peer_rel_id = harness.model.get_relation(PEER).id - tc.assertNotIn("departing", harness.get_relation_data(peer_rel_id, harness.charm.unit)) + assert "departing" not in harness.get_relation_data(peer_rel_id, harness.charm.unit) event = Mock() event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} event.departing_unit = harness.charm.unit @@ -411,7 +407,7 @@ def test_on_relation_departed(harness): event.departing_unit = Unit(f"{harness.charm.app}/1", None, harness.charm.app._backend, {}) harness.charm.legacy_db_relation._on_relation_departed(event) relation_data = harness.get_relation_data(peer_rel_id, harness.charm.unit) - tc.assertNotIn("departing", relation_data) + assert "departing" not in relation_data def test_on_relation_broken(harness): @@ -469,7 +465,7 @@ def test_on_relation_broken_extensions_unblock(harness): # Break the relation that blocked the charm. harness.remove_relation(rel_id) - tc.assertTrue(isinstance(harness.model.unit.status, ActiveStatus)) + assert isinstance(harness.model.unit.status, ActiveStatus) def test_on_relation_broken_extensions_keep_block(harness): @@ -509,4 +505,4 @@ def test_on_relation_broken_extensions_keep_block(harness): event.relation.id = first_rel_id # Break one of the relations that block the charm. harness.charm.legacy_db_relation._on_relation_broken(event) - tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) + assert isinstance(harness.model.unit.status, BlockedStatus) diff --git a/tests/unit/test_patroni.py b/tests/unit/test_patroni.py index b013c0a9ff..d93060e3bd 100644 --- a/tests/unit/test_patroni.py +++ b/tests/unit/test_patroni.py @@ -2,7 +2,6 @@ # Copyright 2021 Canonical Ltd. # See LICENSE file for licensing details. -from unittest import TestCase from unittest.mock import MagicMock, PropertyMock, mock_open, patch import pytest @@ -16,9 +15,6 @@ from patroni import Patroni, SwitchoverFailedError from tests.helpers import STORAGE_PATH -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -61,7 +57,7 @@ def test_get_primary(harness, patroni): # Test returning pod name. primary = patroni.get_primary() - tc.assertEqual(primary, "postgresql-k8s-1") + assert primary == "postgresql-k8s-1" _get.assert_called_once_with( "http://postgresql-k8s-0:8008/cluster", verify=True, timeout=5 ) @@ -69,7 +65,7 @@ def test_get_primary(harness, patroni): # Test returning unit name. _get.reset_mock() primary = patroni.get_primary(unit_name_pattern=True) - tc.assertEqual(primary, "postgresql-k8s/1") + assert primary == "postgresql-k8s/1" _get.assert_called_once_with( "http://postgresql-k8s-0:8008/cluster", verify=True, timeout=5 ) @@ -85,13 +81,13 @@ def test_is_creating_backup(harness, patroni): {"name": "postgresql-k8s-1", "tags": {"is_creating_backup": True}}, ] } - tc.assertTrue(patroni.is_creating_backup) + assert patroni.is_creating_backup # Test when no member is creating a backup. response.json.return_value = { "members": [{"name": "postgresql-k8s-0"}, {"name": "postgresql-k8s-1"}] } - tc.assertFalse(patroni.is_creating_backup) + assert not patroni.is_creating_backup def test_is_replication_healthy(harness, patroni): @@ -102,7 +98,7 @@ def test_is_replication_healthy(harness, patroni): ): # Test when replication is healthy. _get.return_value.status_code = 200 - tc.assertTrue(patroni.is_replication_healthy) + assert patroni.is_replication_healthy # Test when replication is not healthy. _get.side_effect = [ @@ -110,7 +106,7 @@ def test_is_replication_healthy(harness, patroni): MagicMock(status_code=200), MagicMock(status_code=503), ] - tc.assertFalse(patroni.is_replication_healthy) + assert not patroni.is_replication_healthy def test_member_streaming(harness, patroni): @@ -120,18 +116,18 @@ def test_member_streaming(harness, patroni): ): # Test when the member is streaming from primary. _get.return_value.json.return_value = {"replication_state": "streaming"} - tc.assertTrue(patroni.member_streaming) + assert patroni.member_streaming # Test when the member is not streaming from primary. _get.return_value.json.return_value = {"replication_state": "running"} - tc.assertFalse(patroni.member_streaming) + assert not patroni.member_streaming _get.return_value.json.return_value = {} - tc.assertFalse(patroni.member_streaming) + assert not patroni.member_streaming # Test when an error happens. _get.side_effect = RetryError - tc.assertFalse(patroni.member_streaming) + assert not patroni.member_streaming def test_render_file(harness, patroni): @@ -155,7 +151,7 @@ def test_render_file(harness, patroni): patroni._render_file(filename, "rendered-content", 0o640) # Check the rendered file is opened with "w+" mode. - tc.assertEqual(mock.call_args_list[0][0], (filename, "w+")) + assert mock.call_args_list[0][0] == (filename, "w+") # Ensure that the correct user is lookup up. _pwnam.assert_called_with("postgres") # Ensure the file is chmod'd correctly. @@ -199,7 +195,7 @@ def test_render_patroni_yml_file(harness, patroni): patroni.render_patroni_yml_file(enable_tls=False) # Check the template is opened read-only in the call to open. - tc.assertEqual(mock.call_args_list[0][0], ("templates/patroni.yml.j2", "r")) + assert mock.call_args_list[0][0] == ("templates/patroni.yml.j2", "r") # Ensure the correct rendered template is sent to _render_file method. _render_file.assert_called_once_with( f"{STORAGE_PATH}/patroni.yml", @@ -222,7 +218,7 @@ def test_render_patroni_yml_file(harness, patroni): minority_count=patroni._members_count // 2, version="14", ) - tc.assertNotEqual(expected_content_with_tls, expected_content) + assert expected_content_with_tls != expected_content # Patch the `open` method with our mock. with patch("builtins.open", mock, create=True): @@ -252,18 +248,18 @@ def test_primary_endpoint_ready(harness, patroni): ): # Test with an issue when trying to connect to the Patroni API. _get.side_effect = RetryError - tc.assertFalse(patroni.primary_endpoint_ready) + assert not patroni.primary_endpoint_ready # Mock the request return values. _get.side_effect = None _get.return_value.json.return_value = {"state": "stopped"} # Test with the primary endpoint not ready yet. - tc.assertFalse(patroni.primary_endpoint_ready) + assert not patroni.primary_endpoint_ready # Test with the primary endpoint ready. _get.return_value.json.return_value = {"state": "running"} - tc.assertTrue(patroni.primary_endpoint_ready) + assert patroni.primary_endpoint_ready def test_switchover(harness, patroni): @@ -296,8 +292,11 @@ def test_switchover(harness, patroni): # Test failed switchovers. _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-1"] - with tc.assertRaises(SwitchoverFailedError): + try: patroni.switchover("postgresql-k8s/2") + assert False + except SwitchoverFailedError: + pass _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, @@ -307,8 +306,11 @@ def test_switchover(harness, patroni): _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-2"] response.status_code = 400 - with tc.assertRaises(SwitchoverFailedError): + try: patroni.switchover("postgresql-k8s/2") + assert False + except SwitchoverFailedError: + pass _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py index d1adae5015..d969ec7c91 100644 --- a/tests/unit/test_postgresql.py +++ b/tests/unit/test_postgresql.py @@ -1,6 +1,5 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -from unittest import TestCase from unittest.mock import call, patch import psycopg2 @@ -12,9 +11,6 @@ from charm import PostgresqlOperatorCharm from constants import PEER -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -151,92 +147,89 @@ def test_create_database(harness): # Test a failed database creation. _enable_disable_extensions.reset_mock() execute.side_effect = psycopg2.Error - with tc.assertRaises(PostgreSQLCreateDatabaseError): + try: harness.charm.postgresql.create_database(database, user, plugins, client_relations) + assert False + except PostgreSQLCreateDatabaseError: + pass _enable_disable_extensions.assert_not_called() def test_generate_database_privileges_statements(harness): # Test with only one established relation. - tc.assertEqual( - harness.charm.postgresql._generate_database_privileges_statements( - 1, ["test_schema_1", "test_schema_2"], "test_user" - ), - [ - Composed([ - SQL( - "DO $$\nDECLARE r RECORD;\nBEGIN\n FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '.\"' || tablename ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema')\nUNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '.\"' || sequence_name ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema')\nUNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema')\nUNION SELECT 4 AS index,'ALTER VIEW '|| schemaname || '.\"' || viewname ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP\n EXECUTE format(r.statement);\n END LOOP;\nEND; $$;" - ), - ]), - "UPDATE pg_catalog.pg_largeobject_metadata\nSET lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'test_user')\nWHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'operator');", - ], - ) + assert harness.charm.postgresql._generate_database_privileges_statements( + 1, ["test_schema_1", "test_schema_2"], "test_user" + ) == [ + Composed([ + SQL( + "DO $$\nDECLARE r RECORD;\nBEGIN\n FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '.\"' || tablename ||'\" OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema')\nUNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '.\"' || sequence_name ||'\" OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema')\nUNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema')\nUNION SELECT 4 AS index,'ALTER VIEW '|| schemaname || '.\"' || viewname ||'\" OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP\n EXECUTE format(r.statement);\n END LOOP;\nEND; $$;" + ), + ]), + "UPDATE pg_catalog.pg_largeobject_metadata\nSET lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'test_user')\nWHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'operator');", + ] # Test with multiple established relations. - tc.assertEqual( - harness.charm.postgresql._generate_database_privileges_statements( - 2, ["test_schema_1", "test_schema_2"], "test_user" - ), - [ - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - ], - ) + assert harness.charm.postgresql._generate_database_privileges_statements( + 2, ["test_schema_1", "test_schema_2"], "test_user" + ) == [ + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), + Identifier("test_schema_1"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), + Identifier("test_schema_1"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), + Identifier("test_schema_1"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), + Identifier("test_schema_2"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), + Identifier("test_schema_2"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), + Identifier("test_schema_2"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + ] def test_build_postgresql_parameters(harness): @@ -255,51 +248,53 @@ def test_build_postgresql_parameters(harness): "response_test_config_option_8": "partial", "vacuum_test_config_option_9": 10.5, } - tc.assertEqual( - harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000), - { - "test_config_option_1": True, - "test_config_option_2": False, - "test_config_option_3": "on", - "test_config_option_4": 1024, - "test_config_option_5": "scheduled", - "test_config_option_7": "off", - "DateStyle": "ISO, DMY", - "TimeZone": "UTC", - "test_config_option_8": "partial", - "test_config_option_9": 10.5, - "shared_buffers": "250MB", - "effective_cache_size": "750MB", - }, - ) + assert harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) == { + "test_config_option_1": True, + "test_config_option_2": False, + "test_config_option_3": "on", + "test_config_option_4": 1024, + "test_config_option_5": "scheduled", + "test_config_option_7": "off", + "DateStyle": "ISO, DMY", + "TimeZone": "UTC", + "test_config_option_8": "partial", + "test_config_option_9": 10.5, + "shared_buffers": "250MB", + "effective_cache_size": "750MB", + } # Test with a limited imposed to the available memory. parameters = harness.charm.postgresql.build_postgresql_parameters( config_options, 1000000000, 600000000 ) - tc.assertEqual(parameters["shared_buffers"], "150MB") - tc.assertEqual(parameters["effective_cache_size"], "450MB") + assert parameters["shared_buffers"] == "150MB" + assert parameters["effective_cache_size"] == "450MB" # Test when the requested shared buffers are greater than 40% of the available memory. config_options["memory_shared_buffers"] = 50001 - with tc.assertRaises(Exception): + try: harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) + assert False + except AssertionError as e: + raise e + except Exception: + pass # Test when the requested shared buffers are lower than 40% of the available memory # (also check that it's used when calculating the effective cache size value). config_options["memory_shared_buffers"] = 50000 parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - tc.assertEqual(parameters["shared_buffers"], 50000) - tc.assertEqual(parameters["effective_cache_size"], "600MB") + assert parameters["shared_buffers"] == 50000 + assert parameters["effective_cache_size"] == "600MB" # Test when the profile is set to "testing". config_options["profile"] = "testing" parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - tc.assertEqual(parameters["shared_buffers"], 50000) - tc.assertNotIn("effective_cache_size", parameters) + assert parameters["shared_buffers"] == 50000 + assert "effective_cache_size" not in parameters # Test when there is no shared_buffers value set in the config option. del config_options["memory_shared_buffers"] parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - tc.assertEqual(parameters["shared_buffers"], "128MB") - tc.assertNotIn("effective_cache_size", parameters) + assert parameters["shared_buffers"] == "128MB" + assert "effective_cache_size" not in parameters diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index 773386cfc5..bb94b4bf60 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -1,7 +1,6 @@ # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. -from unittest import TestCase from unittest.mock import Mock, PropertyMock, patch import pytest @@ -24,9 +23,6 @@ RELATION_NAME = "database" POSTGRESQL_VERSION = "14" -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -124,51 +120,42 @@ def test_on_database_requested(harness): postgresql_mock.get_postgresql_version.assert_called_once() # Assert that the relation data was updated correctly. - tc.assertEqual( - harness.get_relation_data(rel_id, harness.charm.app.name), - { - "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', - "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", - "username": user, - "password": "test-password", - "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", - "version": POSTGRESQL_VERSION, - "database": f"{DATABASE}", - }, - ) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == { + "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', + "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", + "username": user, + "password": "test-password", + "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", + "version": POSTGRESQL_VERSION, + "database": f"{DATABASE}", + } # Assert no BlockedStatus was set. - tc.assertFalse(isinstance(harness.model.unit.status, BlockedStatus)) + assert not isinstance(harness.model.unit.status, BlockedStatus) # BlockedStatus due to a PostgreSQLCreateUserError. request_database(harness) - tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) + assert isinstance(harness.model.unit.status, BlockedStatus) # No data is set in the databag by the database. - tc.assertEqual( - harness.get_relation_data(rel_id, harness.charm.app.name), - { - "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', - "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", - "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", - }, - ) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == { + "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', + "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", + "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", + } # BlockedStatus due to a PostgreSQLCreateDatabaseError. request_database(harness) - tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) + assert isinstance(harness.model.unit.status, BlockedStatus) # No data is set in the databag by the database. - tc.assertEqual( - harness.get_relation_data(rel_id, harness.charm.app.name), - { - "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', - "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", - "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", - }, - ) + assert harness.get_relation_data(rel_id, harness.charm.app.name) == { + "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', + "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", + "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", + } # BlockedStatus due to a PostgreSQLGetPostgreSQLVersionError. request_database(harness) - tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) + assert isinstance(harness.model.unit.status, BlockedStatus) @patch_network_get(private_address="1.1.1.1") @@ -176,7 +163,7 @@ def test_on_relation_departed(harness): with patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)): peer_rel_id = harness.model.get_relation(PEER).id # Test when this unit is departing the relation (due to a scale down event). - tc.assertNotIn("departing", harness.get_relation_data(peer_rel_id, harness.charm.unit)) + assert "departing" not in harness.get_relation_data(peer_rel_id, harness.charm.unit) event = Mock() event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} event.departing_unit = harness.charm.unit @@ -190,7 +177,7 @@ def test_on_relation_departed(harness): event.departing_unit = Unit(f"{harness.charm.app}/1", None, harness.charm.app._backend, {}) harness.charm.postgresql_client_relation._on_relation_departed(event) relation_data = harness.get_relation_data(peer_rel_id, harness.charm.unit) - tc.assertNotIn("departing", relation_data) + assert "departing" not in relation_data @patch_network_get(private_address="1.1.1.1") diff --git a/tests/unit/test_postgresql_tls.py b/tests/unit/test_postgresql_tls.py index 3fe00b324d..91472149bf 100644 --- a/tests/unit/test_postgresql_tls.py +++ b/tests/unit/test_postgresql_tls.py @@ -2,7 +2,6 @@ # See LICENSE file for licensing details. import base64 import socket -from unittest import TestCase from unittest.mock import MagicMock, call, patch import pytest @@ -16,9 +15,6 @@ RELATION_NAME = "certificates" SCOPE = "unit" -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -124,8 +120,8 @@ def test_request_certificate(harness): ], ) _generate_csr.assert_has_calls([generate_csr_call]) - tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "key")) - tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "csr")) + assert harness.charm.get_secret(SCOPE, "key") is not None + assert harness.charm.get_secret(SCOPE, "csr") is not None _request_certificate_creation.assert_not_called() # Test without providing a private key. @@ -134,8 +130,8 @@ def test_request_certificate(harness): relate_to_tls_certificates_operator(harness) harness.charm.tls._request_certificate(None) _generate_csr.assert_has_calls([generate_csr_call]) - tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "key")) - tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "csr")) + assert harness.charm.get_secret(SCOPE, "key") is not None + assert harness.charm.get_secret(SCOPE, "csr") is not None _request_certificate_creation.assert_called_once() # Test providing a private key. @@ -157,8 +153,8 @@ def test_request_certificate(harness): ], ) _generate_csr.assert_has_calls([custom_key_generate_csr_call]) - tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "key")) - tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "csr")) + assert harness.charm.get_secret(SCOPE, "key") is not None + assert harness.charm.get_secret(SCOPE, "csr") is not None _request_certificate_creation.assert_called_once() @@ -166,14 +162,14 @@ def test_parse_tls_file(harness): # Test with a plain text key. key = get_content_from_file(filename="tests/unit/key.pem") parsed_key = harness.charm.tls._parse_tls_file(key) - tc.assertEqual(parsed_key, key.encode("utf-8")) + assert parsed_key == key.encode("utf-8") # Test with a base64 encoded key. key = get_content_from_file(filename="tests/unit/key.pem") parsed_key = harness.charm.tls._parse_tls_file( base64.b64encode(key.encode("utf-8")).decode("utf-8") ) - tc.assertEqual(parsed_key, key.encode("utf-8")) + assert parsed_key == key.encode("utf-8") def test_on_tls_relation_joined(harness): @@ -191,7 +187,7 @@ def test_on_tls_relation_broken(harness): rel_id = relate_to_tls_certificates_operator(harness) harness.remove_relation(rel_id) _update_config.assert_called_once() - tc.assertTrue(no_secrets(harness)) + assert no_secrets(harness) def test_on_certificate_available(harness): @@ -203,17 +199,17 @@ def test_on_certificate_available(harness): ): # Test with no provided or invalid CSR. emit_certificate_available_event(harness) - tc.assertTrue(no_secrets(harness)) + assert no_secrets(harness) _push_tls_files_to_workload.assert_not_called() # Test providing CSR. harness.charm.set_secret(SCOPE, "csr", "test-csr\n") emit_certificate_available_event(harness) - tc.assertEqual(harness.charm.get_secret(SCOPE, "ca"), "test-ca") - tc.assertEqual(harness.charm.get_secret(SCOPE, "cert"), "test-cert") - tc.assertEqual( - harness.charm.get_secret(SCOPE, "chain"), - "test-chain-ca-certificate\ntest-chain-certificate", + assert harness.charm.get_secret(SCOPE, "ca") == "test-ca" + assert harness.charm.get_secret(SCOPE, "cert") == "test-cert" + assert ( + harness.charm.get_secret(SCOPE, "chain") + == "test-chain-ca-certificate\ntest-chain-certificate" ) _push_tls_files_to_workload.assert_called_once() _defer.assert_not_called() @@ -232,7 +228,7 @@ def test_on_certificate_expiring(harness): ): # Test with no provided or invalid certificate. emit_certificate_expiring_event(harness) - tc.assertTrue(no_secrets(harness)) + assert no_secrets(harness) # Test providing a certificate. harness.charm.set_secret( @@ -241,41 +237,38 @@ def test_on_certificate_expiring(harness): harness.charm.set_secret(SCOPE, "cert", "test-cert\n") harness.charm.set_secret(SCOPE, "csr", "test-csr") emit_certificate_expiring_event(harness) - tc.assertTrue(no_secrets(harness, include_certificate=False)) + assert no_secrets(harness, include_certificate=False) _request_certificate_renewal.assert_called_once() @patch_network_get(private_address="1.1.1.1") def test_get_sans(harness): sans = harness.charm.tls._get_sans() - tc.assertEqual( - sans, - { - "sans_ip": ["1.1.1.1"], - "sans_dns": [ - "postgresql-k8s-0", - "postgresql-k8s-0.postgresql-k8s-endpoints", - socket.getfqdn(), - "1.1.1.1", - "postgresql-k8s-primary.None.svc.cluster.local", - "postgresql-k8s-replicas.None.svc.cluster.local", - ], - }, - ) + assert sans == { + "sans_ip": ["1.1.1.1"], + "sans_dns": [ + "postgresql-k8s-0", + "postgresql-k8s-0.postgresql-k8s-endpoints", + socket.getfqdn(), + "1.1.1.1", + "postgresql-k8s-primary.None.svc.cluster.local", + "postgresql-k8s-replicas.None.svc.cluster.local", + ], + } def test_get_tls_files(harness): # Test with no TLS files available. key, ca, certificate = harness.charm.tls.get_tls_files() - tc.assertIsNone(key) - tc.assertIsNone(ca) - tc.assertIsNone(certificate) + assert key is None + assert ca is None + assert certificate is None # Test with TLS files available. harness.charm.set_secret(SCOPE, "key", "test-key") harness.charm.set_secret(SCOPE, "ca", "test-ca") harness.charm.set_secret(SCOPE, "cert", "test-cert") key, ca, certificate = harness.charm.tls.get_tls_files() - tc.assertEqual(key, "test-key") - tc.assertEqual(ca, "test-ca") - tc.assertEqual(certificate, "test-cert") + assert key == "test-key" + assert ca == "test-ca" + assert certificate == "test-cert" diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 9d90af81fc..6b74843bd4 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -1,6 +1,5 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -from unittest import TestCase from unittest.mock import MagicMock, PropertyMock, call, patch import pytest @@ -16,9 +15,6 @@ from patroni import SwitchoverFailedError from tests.unit.helpers import _FakeApiError -# used for assert functions -tc = TestCase() - @pytest.fixture(autouse=True) def harness(): @@ -42,7 +38,7 @@ def harness(): def test_is_no_sync_member(harness): # Test when there is no list of sync-standbys in the relation data. - tc.assertFalse(harness.charm.upgrade.is_no_sync_member) + assert not harness.charm.upgrade.is_no_sync_member upgrade_relation_id = harness.model.get_relation("upgrade").id # Test when the current unit is not part of the list of sync-standbys @@ -53,7 +49,7 @@ def test_is_no_sync_member(harness): harness.charm.app.name, {"sync-standbys": '["postgresql-k8s/1", "postgresql-k8s/2"]'}, ) - tc.assertTrue(harness.charm.upgrade.is_no_sync_member) + assert harness.charm.upgrade.is_no_sync_member # Test when the current unit is part of the list of sync-standbys from the relation data. with harness.hooks_disabled(): @@ -64,7 +60,7 @@ def test_is_no_sync_member(harness): "sync-standbys": f'["{harness.charm.unit.name}", "postgresql-k8s/1", "postgresql-k8s/2"]' }, ) - tc.assertFalse(harness.charm.upgrade.is_no_sync_member) + assert not harness.charm.upgrade.is_no_sync_member def test_log_rollback(harness): @@ -197,15 +193,21 @@ def test_pre_upgrade_check(harness): _switchover.side_effect = [None, SwitchoverFailedError] # Test when not all members are ready. - with tc.assertRaises(ClusterNotReadyError): + try: harness.charm.upgrade.pre_upgrade_check() + assert False + except ClusterNotReadyError: + pass _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() # Test when a backup is being created. - with tc.assertRaises(ClusterNotReadyError): + try: harness.charm.upgrade.pre_upgrade_check() + assert False + except ClusterNotReadyError: + pass _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() @@ -224,8 +226,11 @@ def test_pre_upgrade_check(harness): _set_rolling_update_partition.reset_mock() _get_primary.return_value = f"{harness.charm.app.name}/1" _get_sync_standby_names.return_value = [] - with tc.assertRaises(ClusterNotReadyError): + try: harness.charm.upgrade.pre_upgrade_check() + assert False + except ClusterNotReadyError: + pass _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() @@ -243,8 +248,11 @@ def test_pre_upgrade_check(harness): # Test when the switchover fails. _switchover.reset_mock() _set_rolling_update_partition.reset_mock() - with tc.assertRaises(ClusterNotReadyError): + try: harness.charm.upgrade.pre_upgrade_check() + assert False + except ClusterNotReadyError: + pass _switchover.assert_called_once_with(unit_zero_name) _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() @@ -253,8 +261,11 @@ def test_pre_upgrade_check(harness): _switchover.reset_mock() _set_rolling_update_partition.reset_mock() _get_sync_standby_names.return_value = f'["{harness.charm.app.name}/2"]' - with tc.assertRaises(ClusterNotReadyError): + try: harness.charm.upgrade.pre_upgrade_check() + assert False + except ClusterNotReadyError: + pass _switchover.assert_not_called() _set_list_of_sync_standbys.assert_called_once() _set_rolling_update_partition.assert_not_called() @@ -273,9 +284,8 @@ def test_set_list_of_sync_standbys(harness): # Test when the there are less than 3 units in the cluster. harness.charm.upgrade._set_list_of_sync_standbys() - tc.assertNotIn( - "sync-standbys", - harness.get_relation_data(upgrade_relation_id, harness.charm.app), + assert "sync-standbys" not in harness.get_relation_data( + upgrade_relation_id, harness.charm.app ) # Test when the there are 3 units in the cluster. @@ -286,9 +296,9 @@ def test_set_list_of_sync_standbys(harness): upgrade_relation_id, "postgresql-k8s/2", {"state": "idle"} ) harness.charm.upgrade._set_list_of_sync_standbys() - tc.assertEqual( - harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"], - '["postgresql-k8s/0"]', + assert ( + harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"] + == '["postgresql-k8s/0"]' ) # Test when the unit zero is already a sync-standby. @@ -299,16 +309,16 @@ def test_set_list_of_sync_standbys(harness): upgrade_relation_id, "postgresql-k8s/3", {"state": "idle"} ) harness.charm.upgrade._set_list_of_sync_standbys() - tc.assertEqual( - harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"], - '["postgresql-k8s/0", "postgresql-k8s/1"]', + assert ( + harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"] + == '["postgresql-k8s/0", "postgresql-k8s/1"]' ) # Test when the unit zero is not a sync-standby yet. harness.charm.upgrade._set_list_of_sync_standbys() - tc.assertEqual( - harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"], - '["postgresql-k8s/1", "postgresql-k8s/0"]', + assert ( + harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"] + == '["postgresql-k8s/1", "postgresql-k8s/0"]' ) @@ -326,13 +336,17 @@ def test_set_rolling_update_partition(harness): # Test an operation that failed due to lack of Juju's trust flag. _client.return_value.patch.reset_mock() _client.return_value.patch.side_effect = _FakeApiError(403) - with tc.assertRaises(KubernetesClientError) as exception: + try: harness.charm.upgrade._set_rolling_update_partition(2) - tc.assertEqual(exception.exception.cause, "`juju trust` needed") + assert False + except KubernetesClientError as exception: + assert exception.cause == "`juju trust` needed" # Test an operation that failed due to some other reason. _client.return_value.patch.reset_mock() _client.return_value.patch.side_effect = _FakeApiError - with tc.assertRaises(KubernetesClientError) as exception: + try: harness.charm.upgrade._set_rolling_update_partition(2) - tc.assertEqual(exception.exception.cause, "broken") + assert False + except KubernetesClientError as exception: + assert exception.cause == "broken" diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 1080e659e6..5bf893e663 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -2,21 +2,32 @@ # See LICENSE file for licensing details. import re -from unittest import TestCase -from utils import new_password - -# used for assert functions -tc = TestCase() +from utils import any_cpu_to_cores, any_memory_to_bytes, new_password def test_new_password(): # Test the password generation twice in order to check if we get different passwords and # that they meet the required criteria. first_password = new_password() - tc.assertEqual(len(first_password), 16) - tc.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", first_password)) + assert len(first_password) == 16 + assert re.fullmatch("[a-zA-Z0-9\b]{16}$", first_password) is not None second_password = new_password() - tc.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", second_password)) - tc.assertNotEqual(second_password, first_password) + assert re.fullmatch("[a-zA-Z0-9\b]{16}$", second_password) is not None + assert second_password != first_password + + +def test_any_memory_to_bytes(): + assert any_memory_to_bytes("1KI") == 1024 + + try: + any_memory_to_bytes("KI") + assert False + except ValueError as e: + assert str(e) == "Invalid memory definition in 'KI'" + + +def test_any_cpu_to_cores(): + assert any_cpu_to_cores("12") == 12 + assert any_cpu_to_cores("1000m") == 1 diff --git a/tox.ini b/tox.ini index 8a1b2c3ba3..43e7eaec32 100644 --- a/tox.ini +++ b/tox.ini @@ -68,6 +68,7 @@ commands = poetry run coverage run --source={[vars]src_path} \ -m pytest -v --tb native -s {posargs} {[vars]tests_path}/unit poetry run coverage report + poetry run coverage xml [testenv:integration] description = Run integration tests