Skip to content

Commit

Permalink
tests/unhealthy_database: Migrate from sync to async database pools (#…
Browse files Browse the repository at this point in the history
…8441)

All of the API endpoints in these tests are using the async database pools at this point, so waiting until the *sync* connection pools are healthy again isn't particularly helpful :D
  • Loading branch information
Turbo87 authored Apr 10, 2024
1 parent 92ace94 commit cd48386
Showing 1 changed file with 22 additions and 21 deletions.
43 changes: 22 additions & 21 deletions src/tests/unhealthy_database.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,22 @@
use crate::util::{RequestHelper, TestApp};
use deadpool_diesel::postgres::Pool;
use deadpool_diesel::Timeouts;
use http::StatusCode;
use std::time::Duration;

const DB_HEALTHY_TIMEOUT: Duration = Duration::from_millis(2000);

fn default_timeouts() -> Timeouts {
Timeouts::wait_millis(DB_HEALTHY_TIMEOUT.as_millis() as u64)
}

fn wait_until_healthy(pool: &Pool, app: &TestApp) {
let _ = app
.runtime()
.block_on(pool.timeout_get(&default_timeouts()))
.expect("the database did not return healthy");
}

#[test]
fn http_error_with_unhealthy_database() {
let (app, anon) = TestApp::init().with_chaos_proxy().empty();
Expand All @@ -17,10 +30,7 @@ fn http_error_with_unhealthy_database() {
assert_eq!(response.status(), StatusCode::SERVICE_UNAVAILABLE);

app.primary_db_chaosproxy().restore_networking().unwrap();
app.as_inner()
.primary_database
.wait_until_healthy(DB_HEALTHY_TIMEOUT)
.expect("the database did not return healthy");
wait_until_healthy(&app.as_inner().deadpool_primary, &app);

let response = anon.get::<()>("/api/v1/summary");
assert_eq!(response.status(), StatusCode::OK);
Expand All @@ -43,10 +53,7 @@ fn fallback_to_replica_returns_user_info() {

// restore primary database connection
app.primary_db_chaosproxy().restore_networking().unwrap();
app.as_inner()
.primary_database
.wait_until_healthy(DB_HEALTHY_TIMEOUT)
.expect("the database did not return healthy");
wait_until_healthy(&app.as_inner().deadpool_primary, &app);
}

#[test]
Expand All @@ -67,22 +74,19 @@ fn restored_replica_returns_user_info() {

// Once the replica database is restored, it should serve as a fallback again
app.replica_db_chaosproxy().restore_networking().unwrap();
app.as_inner()
.read_only_replica_database
let replica = app
.as_inner()
.deadpool_replica
.as_ref()
.expect("no replica database configured")
.wait_until_healthy(DB_HEALTHY_TIMEOUT)
.expect("the database did not return healthy");
.expect("no replica database configured");
wait_until_healthy(replica, &app);

let response = owner.get::<()>(URL);
assert_eq!(response.status(), StatusCode::OK);

// restore connection
app.primary_db_chaosproxy().restore_networking().unwrap();
app.as_inner()
.primary_database
.wait_until_healthy(DB_HEALTHY_TIMEOUT)
.expect("the database did not return healthy");
wait_until_healthy(&app.as_inner().deadpool_primary, &app);
}

#[test]
Expand All @@ -103,10 +107,7 @@ fn restored_primary_returns_user_info() {

// Once the replica database is restored, it should serve as a fallback again
app.primary_db_chaosproxy().restore_networking().unwrap();
app.as_inner()
.primary_database
.wait_until_healthy(DB_HEALTHY_TIMEOUT)
.expect("the database did not return healthy");
wait_until_healthy(&app.as_inner().deadpool_primary, &app);

let response = owner.get::<()>(URL);
assert_eq!(response.status(), StatusCode::OK);
Expand Down

0 comments on commit cd48386

Please sign in to comment.