-
Notifications
You must be signed in to change notification settings - Fork 20
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[MISC] Update snap and renovate fixes #432
Changes from all commits
3dc464e
031f229
2bf92a1
a48c740
0d085f0
bdefebd
3033f6a
91c1f21
a6dfc0c
b8be2bf
d5521d6
a327439
7f45ebe
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1390,7 +1390,11 @@ def push_tls_files_to_workload(self) -> bool: | |
if cert is not None: | ||
self._patroni.render_file(f"{PATRONI_CONF_PATH}/{TLS_CERT_FILE}", cert, 0o600) | ||
|
||
return self.update_config() | ||
try: | ||
return self.update_config() | ||
except Exception: | ||
logger.exception("TLS files failed to push. Error in config update") | ||
return False | ||
Comment on lines
+1393
to
+1397
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We don't really check for exceptions or return value of There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah.... this is what we saw with @beliaev-maksim yesterday on a troubleshooting. Tnx for the quick fix here! |
||
|
||
def _reboot_on_detached_storage(self, event: EventBase) -> None: | ||
"""Reboot on detached storage. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -92,11 +92,8 @@ async def cloud_configs(ops_test: OpsTest, github_secrets) -> None: | |
|
||
@pytest.mark.group(1) | ||
@pytest.mark.abort_on_fail | ||
async def test_backup(ops_test: OpsTest, cloud_configs: Tuple[Dict, Dict]) -> None: | ||
async def test_backup(ops_test: OpsTest, cloud_configs: Tuple[Dict, Dict], charm) -> None: | ||
"""Build and deploy two units of PostgreSQL and then test the backup and restore actions.""" | ||
# Build the PostgreSQL charm. | ||
charm = await ops_test.build_charm(".") | ||
|
||
# Deploy S3 Integrator and TLS Certificates Operator. | ||
await ops_test.model.deploy(S3_INTEGRATOR_APP_NAME) | ||
await ops_test.model.deploy(TLS_CERTIFICATES_APP_NAME, config=TLS_CONFIG, channel=TLS_CHANNEL) | ||
|
@@ -126,7 +123,7 @@ async def test_backup(ops_test: OpsTest, cloud_configs: Tuple[Dict, Dict]) -> No | |
await action.wait() | ||
async with ops_test.fast_forward(fast_interval="60s"): | ||
await ops_test.model.wait_for_idle( | ||
apps=[database_app_name, S3_INTEGRATOR_APP_NAME], status="active", timeout=1200 | ||
apps=[database_app_name, S3_INTEGRATOR_APP_NAME], status="active", timeout=1500 | ||
) | ||
|
||
primary = await get_primary(ops_test, f"{database_app_name}/0") | ||
|
@@ -279,14 +276,14 @@ async def test_backup(ops_test: OpsTest, cloud_configs: Tuple[Dict, Dict]) -> No | |
|
||
# Remove the database app. | ||
await ops_test.model.remove_application(database_app_name, block_until_done=True) | ||
|
||
# Remove the TLS operator. | ||
await ops_test.model.remove_application(TLS_CERTIFICATES_APP_NAME, block_until_done=True) | ||
|
||
|
||
@pytest.mark.group(1) | ||
async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets) -> None: | ||
async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets, charm) -> None: | ||
"""Test that is possible to restore a backup to another PostgreSQL cluster.""" | ||
charm = await ops_test.build_charm(".") | ||
previous_database_app_name = f"{DATABASE_APP_NAME}-gcp" | ||
database_app_name = f"new-{DATABASE_APP_NAME}" | ||
await ops_test.model.deploy(charm, application_name=previous_database_app_name) | ||
|
@@ -355,12 +352,9 @@ async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets) -> None | |
|
||
# Wait for the restore to complete. | ||
async with ops_test.fast_forward(): | ||
await wait_for_idle_on_blocked( | ||
ops_test, | ||
database_app_name, | ||
0, | ||
S3_INTEGRATOR_APP_NAME, | ||
ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, | ||
unit = ops_test.model.units.get(f"{database_app_name}/0") | ||
await ops_test.model.block_until( | ||
lambda: unit.workload_status_message == ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE | ||
) | ||
Comment on lines
+355
to
358
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Model fails to idle with the new snap. I tried with one minute ffwd interval, but it didn't help. |
||
|
||
# Check that the backup was correctly restored by having only the first created table. | ||
|
@@ -402,12 +396,9 @@ async def test_invalid_config_and_recovery_after_fixing_it( | |
) | ||
await action.wait() | ||
logger.info("waiting for the database charm to become blocked") | ||
await wait_for_idle_on_blocked( | ||
ops_test, | ||
database_app_name, | ||
0, | ||
S3_INTEGRATOR_APP_NAME, | ||
FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE, | ||
unit = ops_test.model.units.get(f"{database_app_name}/0") | ||
await ops_test.model.block_until( | ||
lambda: unit.workload_status_message == FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE | ||
) | ||
|
||
# Provide valid backup configurations, but from another cluster repository. | ||
|
@@ -421,12 +412,9 @@ async def test_invalid_config_and_recovery_after_fixing_it( | |
) | ||
await action.wait() | ||
logger.info("waiting for the database charm to become blocked") | ||
await wait_for_idle_on_blocked( | ||
ops_test, | ||
database_app_name, | ||
0, | ||
S3_INTEGRATOR_APP_NAME, | ||
ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, | ||
unit = ops_test.model.units.get(f"{database_app_name}/0") | ||
await ops_test.model.block_until( | ||
lambda: unit.workload_status_message == ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE | ||
) | ||
|
||
# Provide valid backup configurations, with another path in the S3 bucket. | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The backup tests fail on 2.9.47. Charm cannot be redeployed with the same name.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This seems to be fixed with juju 2.9.49, but there are issues with the subordinates test. I will update in a separate PR.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I cannot find such fixes mentioned in Juju 2.9.47+ release notes: https://discourse.charmhub.io/t/roadmap-releases/5064
Maybe this... strange... Will ask Juju Team to improve release notes as 2.9.48 is missing there completely.