From 613df7b81f62dc3ab71b15990b875b47a4659182 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Tue, 19 Mar 2024 22:34:57 +0000 Subject: [PATCH 1/4] Restore instance tests to #5172 Restore instance tests added in #4691 so that they work with the latest updates in #5172. The tests now use the new `StorageManagerTestHarness` from #5172 instead of the since removed `FakeStorageManager`. Additionally, the dns server code from `sled-agent/src/fakes/nexus` is used rather than the reimplementation in #4691. This shrinks the amount of code added with these tests. --- sled-agent/src/instance.rs | 632 +++++++++++++++++++++++++++++ sled-agent/src/instance_manager.rs | 5 + sled-agent/src/vmm_reservoir.rs | 14 + 3 files changed, 651 insertions(+) diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 34cbe688be..dd50833f56 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -1519,3 +1519,635 @@ impl InstanceRunner { out } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::fakes::nexus::{FakeNexusServer, ServerContext}; + use crate::vmm_reservoir::VmmReservoirManagerHandle; + use crate::zone_bundle::CleanupContext; + use camino_tempfile::Utf8TempDir; + use dns_server::TransientServer; + use dropshot::HttpServer; + use illumos_utils::dladm::MockDladm; + use illumos_utils::dladm::__mock_MockDladm::__create_vnic::Context as MockDladmCreateVnicContext; + use illumos_utils::dladm::__mock_MockDladm::__delete_vnic::Context as MockDladmDeleteVnicContext; + use illumos_utils::opte::params::DhcpConfig; + use illumos_utils::svc::__wait_for_service::Context as MockWaitForServiceContext; + use illumos_utils::zone::MockZones; + use illumos_utils::zone::__mock_MockZones::__boot::Context as MockZonesBootContext; + use illumos_utils::zone::__mock_MockZones::__id::Context as MockZonesIdContext; + use internal_dns::resolver::Resolver; + use omicron_common::api::external::{ + ByteCount, Generation, Hostname, InstanceCpuCount, InstanceState, + }; + use omicron_common::api::internal::nexus::InstanceProperties; + use omicron_common::FileKv; + use sled_storage::manager_test_harness::StorageManagerTestHarness; + use std::net::Ipv6Addr; + use std::str::FromStr; + use tokio::sync::watch::Receiver; + use tokio::time::timeout; + + const TIMEOUT_DURATION: tokio::time::Duration = + tokio::time::Duration::from_secs(30); + + #[derive(Default, Clone)] + enum ReceivedInstanceState { + #[default] + None, + InstancePut(SledInstanceState), + } + + struct NexusServer { + observed_runtime_state: + tokio::sync::watch::Sender, + } + impl FakeNexusServer for NexusServer { + fn cpapi_instances_put( + &self, + _instance_id: Uuid, + new_runtime_state: SledInstanceState, + ) -> Result<(), omicron_common::api::external::Error> { + self.observed_runtime_state + .send(ReceivedInstanceState::InstancePut(new_runtime_state)) + .map_err(|_| { + omicron_common::api::external::Error::internal_error( + "couldn't send SledInstanceState to test driver", + ) + }) + } + } + + struct FakeNexusParts { + nexus_client: NexusClientWithResolver, + _nexus_server: HttpServer, + state_rx: Receiver, + _dns_server: TransientServer, + } + + impl FakeNexusParts { + async fn new(log: &Logger) -> Self { + let (state_tx, state_rx) = + tokio::sync::watch::channel(ReceivedInstanceState::None); + + let _nexus_server = crate::fakes::nexus::start_test_server( + log.new(o!("component" => "FakeNexusServer")), + Box::new(NexusServer { observed_runtime_state: state_tx }), + ); + + let _dns_server = + crate::fakes::nexus::start_dns_server(&log, &_nexus_server) + .await; + + let resolver = Arc::new( + Resolver::new_from_addrs( + log.clone(), + &[_dns_server.dns_server.local_address()], + ) + .unwrap(), + ); + + let nexus_client = + NexusClientWithResolver::new_from_resolver_with_port( + &log, + resolver, + _nexus_server.local_addr().port(), + ); + + Self { nexus_client, _nexus_server, state_rx, _dns_server } + } + } + + fn mock_vnic_contexts( + ) -> (MockDladmCreateVnicContext, MockDladmDeleteVnicContext) { + let create_vnic_ctx = MockDladm::create_vnic_context(); + let delete_vnic_ctx = MockDladm::delete_vnic_context(); + create_vnic_ctx.expect().return_once( + |physical_link: &Etherstub, _, _, _, _| { + assert_eq!(&physical_link.0, "mystub"); + Ok(()) + }, + ); + delete_vnic_ctx.expect().returning(|_| Ok(())); + (create_vnic_ctx, delete_vnic_ctx) + } + + // InstanceManager::ensure_state calls Instance::put_state(Running), + // which calls Instance::propolis_ensure, + // which spawns Instance::monitor_state_task, + // which calls cpapi_instances_put + // and calls Instance::setup_propolis_inner, + // which creates the zone (which isn't real in these tests, of course) + fn mock_zone_contexts( + ) -> (MockZonesBootContext, MockWaitForServiceContext, MockZonesIdContext) + { + let boot_ctx = MockZones::boot_context(); + boot_ctx.expect().return_once(|_| Ok(())); + let wait_ctx = illumos_utils::svc::wait_for_service_context(); + wait_ctx.expect().times(..).returning(|_, _, _| Ok(())); + let zone_id_ctx = MockZones::id_context(); + zone_id_ctx.expect().times(..).returning(|_| Ok(Some(1))); + (boot_ctx, wait_ctx, zone_id_ctx) + } + + // note the "mock" here is different from the vnic/zone contexts above. + // this is actually running code for a dropshot server from propolis. + // (might we want a locally-defined fake whose behavior we can control + // more directly from the test driver?) + // TODO: factor out, this is also in sled-agent-sim. + fn propolis_mock_server( + log: &Logger, + ) -> (HttpServer>, PropolisClient) { + let propolis_bind_address = + SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 0); // allocate port + let dropshot_config = dropshot::ConfigDropshot { + bind_address: propolis_bind_address, + ..Default::default() + }; + let propolis_log = log.new(o!("component" => "propolis-server-mock")); + let private = + Arc::new(propolis_mock_server::Context::new(propolis_log)); + info!(log, "Starting mock propolis-server..."); + let dropshot_log = log.new(o!("component" => "dropshot")); + let mock_api = propolis_mock_server::api(); + + let srv = dropshot::HttpServerStarter::new( + &dropshot_config, + mock_api, + private, + &dropshot_log, + ) + .expect("couldn't create mock propolis-server") + .start(); + + let client = propolis_client::Client::new(&format!( + "http://{}", + srv.local_addr() + )); + + (srv, client) + } + + // make a FakeStorageManager with a "U2" upserted + async fn setup_storage_manager(log: &Logger) -> StorageManagerTestHarness { + let mut harness = StorageManagerTestHarness::new(log).await; + let raw_disks = + harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await; + harness.handle().key_manager_ready().await; + let config = harness.make_config(1, &raw_disks); + let _ = harness + .handle() + .omicron_physical_disks_ensure(config.clone()) + .await + .expect("Ensuring disks should work after key manager is ready"); + harness + } + + async fn instance_struct( + log: &Logger, + propolis_addr: SocketAddr, + nexus_client_with_resolver: NexusClientWithResolver, + storage_handle: StorageHandle, + temp_dir: &String, + ) -> Instance { + let id = Uuid::new_v4(); + let propolis_id = Uuid::new_v4(); + + let ticket = InstanceTicket::new_without_manager_for_test(id); + + let initial_state = + fake_instance_initial_state(propolis_id, propolis_addr); + + let services = fake_instance_manager_services( + log, + storage_handle, + nexus_client_with_resolver, + temp_dir, + ); + + let metadata = InstanceMetadata { + silo_id: Uuid::new_v4(), + project_id: Uuid::new_v4(), + }; + + Instance::new( + log.new(o!("component" => "Instance")), + id, + propolis_id, + ticket, + initial_state, + services, + metadata, + ) + .unwrap() + } + + fn fake_instance_initial_state( + propolis_id: Uuid, + propolis_addr: SocketAddr, + ) -> InstanceInitialState { + let hardware = InstanceHardware { + properties: InstanceProperties { + ncpus: InstanceCpuCount(1), + memory: ByteCount::from_gibibytes_u32(1), + hostname: Hostname::from_str("bert").unwrap(), + }, + nics: vec![], + source_nat: SourceNatConfig { + ip: IpAddr::V6(Ipv6Addr::UNSPECIFIED), + first_port: 0, + last_port: 0, + }, + ephemeral_ip: None, + floating_ips: vec![], + firewall_rules: vec![], + dhcp_config: DhcpConfig { + dns_servers: vec![], + host_domain: None, + search_domains: vec![], + }, + disks: vec![], + cloud_init_bytes: None, + }; + + InstanceInitialState { + hardware, + instance_runtime: InstanceRuntimeState { + propolis_id: Some(propolis_id), + dst_propolis_id: None, + migration_id: None, + gen: Generation::new(), + time_updated: Default::default(), + }, + vmm_runtime: VmmRuntimeState { + state: InstanceState::Creating, + gen: Generation::new(), + time_updated: Default::default(), + }, + propolis_addr, + } + } + + fn fake_instance_manager_services( + log: &Logger, + storage_handle: StorageHandle, + nexus_client_with_resolver: NexusClientWithResolver, + temp_dir: &String, + ) -> InstanceManagerServices { + let vnic_allocator = + VnicAllocator::new("Foo", Etherstub("mystub".to_string())); + let port_manager = PortManager::new( + log.new(o!("component" => "PortManager")), + Ipv6Addr::new(0xfd00, 0x1de, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01), + ); + + let cleanup_context = CleanupContext::default(); + let zone_bundler = ZoneBundler::new( + log.new(o!("component" => "ZoneBundler")), + storage_handle.clone(), + cleanup_context, + ); + + InstanceManagerServices { + nexus_client: nexus_client_with_resolver, + vnic_allocator, + port_manager, + storage: storage_handle, + zone_bundler, + zone_builder_factory: ZoneBuilderFactory::fake(Some(temp_dir)), + } + } + + #[tokio::test] + async fn test_instance_create_events_normal() { + let logctx = omicron_test_utils::dev::test_setup_log( + "test_instance_create_events_normal", + ); + let log = logctx.log.new(o!(FileKv)); + + let (propolis_server, _propolis_client) = propolis_mock_server(&log); + let propolis_addr = propolis_server.local_addr(); + + // automock'd things used during this test + let _mock_vnic_contexts = mock_vnic_contexts(); + let _mock_zone_contexts = mock_zone_contexts(); + + let FakeNexusParts { + nexus_client, + mut state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; + + let mut storage_harness = setup_storage_manager(&log).await; + let storage_handle = storage_harness.handle().clone(); + + let temp_guard = Utf8TempDir::new().unwrap(); + let temp_dir = temp_guard.path().to_string(); + + let inst = timeout( + TIMEOUT_DURATION, + instance_struct( + &log, + propolis_addr, + nexus_client, + storage_handle, + &temp_dir, + ), + ) + .await + .expect("timed out creating Instance struct"); + + let (put_tx, put_rx) = oneshot::channel(); + + // pretending we're InstanceManager::ensure_state, start our "instance" + // (backed by fakes and propolis_mock_server) + inst.put_state(put_tx, InstanceStateRequested::Running) + .await + .expect("failed to send Instance::put_state"); + + // even though we ignore this result at instance creation time in + // practice (to avoid request timeouts), in this test let's make sure + // it actually completes. + timeout(TIMEOUT_DURATION, put_rx) + .await + .expect("timed out waiting for Instance::put_state result") + .expect("failed to receive Instance::put_state result") + .expect("Instance::put_state failed"); + + timeout( + TIMEOUT_DURATION, + state_rx.wait_for(|maybe_state| match maybe_state { + ReceivedInstanceState::InstancePut(sled_inst_state) => { + sled_inst_state.vmm_state.state == InstanceState::Running + } + _ => false, + }), + ) + .await + .expect("timed out waiting for InstanceState::Running in FakeNexus") + .expect("failed to receive FakeNexus' InstanceState"); + + storage_harness.cleanup().await; + logctx.cleanup_successful(); + } + + // tests around dropshot request timeouts during the blocking propolis setup + #[tokio::test] + async fn test_instance_create_timeout_while_starting_propolis() { + let logctx = omicron_test_utils::dev::test_setup_log( + "test_instance_create_timeout_while_starting_propolis", + ); + let log = logctx.log.new(o!(FileKv)); + + // automock'd things used during this test + let _mock_vnic_contexts = mock_vnic_contexts(); + let _mock_zone_contexts = mock_zone_contexts(); + + let FakeNexusParts { + nexus_client, + state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; + + let mut storage_harness = setup_storage_manager(&logctx.log).await; + let storage_handle = storage_harness.handle().clone(); + + let temp_guard = Utf8TempDir::new().unwrap(); + let temp_dir = temp_guard.path().to_string(); + + let inst = timeout( + TIMEOUT_DURATION, + instance_struct( + &log, + // we want to test propolis not ever coming up + SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 1, 0, 0)), + nexus_client, + storage_handle, + &temp_dir, + ), + ) + .await + .expect("timed out creating Instance struct"); + + let (put_tx, put_rx) = oneshot::channel(); + + tokio::time::pause(); + + // pretending we're InstanceManager::ensure_state, try in vain to start + // our "instance", but no propolis server is running + inst.put_state(put_tx, InstanceStateRequested::Running) + .await + .expect("failed to send Instance::put_state"); + + let timeout_fut = timeout(TIMEOUT_DURATION, put_rx); + + tokio::time::advance(TIMEOUT_DURATION).await; + + tokio::time::resume(); + + timeout_fut + .await + .expect_err("*should've* timed out waiting for Instance::put_state, but didn't?"); + + if let ReceivedInstanceState::InstancePut(SledInstanceState { + vmm_state: VmmRuntimeState { state: InstanceState::Running, .. }, + .. + }) = state_rx.borrow().to_owned() + { + panic!("Nexus's InstanceState should never have reached running if zone creation timed out"); + } + + storage_harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_instance_create_timeout_while_creating_zone() { + let logctx = omicron_test_utils::dev::test_setup_log( + "test_instance_create_timeout_while_creating_zone", + ); + let log = logctx.log.new(o!(FileKv)); + + // automock'd things used during this test + let _mock_vnic_contexts = mock_vnic_contexts(); + + let rt_handle = tokio::runtime::Handle::current(); + + // time out while booting zone, on purpose! + let boot_ctx = MockZones::boot_context(); + boot_ctx.expect().return_once(move |_| { + rt_handle.block_on(tokio::time::sleep(TIMEOUT_DURATION * 2)); + Ok(()) + }); + let wait_ctx = illumos_utils::svc::wait_for_service_context(); + wait_ctx.expect().times(..).returning(|_, _, _| Ok(())); + let zone_id_ctx = MockZones::id_context(); + zone_id_ctx.expect().times(..).returning(|_| Ok(Some(1))); + + let FakeNexusParts { + nexus_client, + state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; + + let mut storage_harness = setup_storage_manager(&logctx.log).await; + let storage_handle = storage_harness.handle().clone(); + + let temp_guard = Utf8TempDir::new().unwrap(); + let temp_dir = temp_guard.path().to_string(); + + let inst = timeout( + TIMEOUT_DURATION, + instance_struct( + &log, + // isn't running because the "zone" never "boots" + SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 1, 0, 0)), + nexus_client, + storage_handle, + &temp_dir, + ), + ) + .await + .expect("timed out creating Instance struct"); + + tokio::time::pause(); + + let (put_tx, put_rx) = oneshot::channel(); + + // pretending we're InstanceManager::ensure_state, try in vain to start + // our "instance", but the zone never finishes installing + inst.put_state(put_tx, InstanceStateRequested::Running) + .await + .expect("failed to send Instance::put_state"); + + let timeout_fut = timeout(TIMEOUT_DURATION, put_rx); + + tokio::time::advance(TIMEOUT_DURATION * 2).await; + + tokio::time::resume(); + + timeout_fut + .await + .expect_err("*should've* timed out waiting for Instance::put_state, but didn't?"); + + if let ReceivedInstanceState::InstancePut(SledInstanceState { + vmm_state: VmmRuntimeState { state: InstanceState::Running, .. }, + .. + }) = state_rx.borrow().to_owned() + { + panic!("Nexus's InstanceState should never have reached running if zone creation timed out"); + } + + storage_harness.cleanup().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_instance_manager_creation() { + let logctx = omicron_test_utils::dev::test_setup_log( + "test_instance_manager_creation", + ); + let log = logctx.log.new(o!(FileKv)); + + // automock'd things used during this test + let _mock_vnic_contexts = mock_vnic_contexts(); + let _mock_zone_contexts = mock_zone_contexts(); + + let mut storage_harness = setup_storage_manager(&logctx.log).await; + let storage_handle = storage_harness.handle().clone(); + + let FakeNexusParts { + nexus_client, + mut state_rx, + _dns_server, + _nexus_server, + } = FakeNexusParts::new(&log).await; + + let temp_guard = Utf8TempDir::new().unwrap(); + let temp_dir = temp_guard.path().to_string(); + + let InstanceManagerServices { + nexus_client, + vnic_allocator: _, + port_manager, + storage, + zone_bundler, + zone_builder_factory, + } = fake_instance_manager_services( + &log, + storage_handle, + nexus_client, + &temp_dir, + ); + + let etherstub = Etherstub("mystub".to_string()); + + let vmm_reservoir_manager = VmmReservoirManagerHandle::stub_for_test(); + + let mgr = crate::instance_manager::InstanceManager::new( + logctx.log.new(o!("component" => "InstanceManager")), + nexus_client, + etherstub, + port_manager, + storage, + zone_bundler, + zone_builder_factory, + vmm_reservoir_manager, + ) + .unwrap(); + + let (propolis_server, _propolis_client) = + propolis_mock_server(&logctx.log); + let propolis_addr = propolis_server.local_addr(); + + let instance_id = Uuid::new_v4(); + let propolis_id = Uuid::new_v4(); + let InstanceInitialState { + hardware, + instance_runtime, + vmm_runtime, + propolis_addr, + } = fake_instance_initial_state(propolis_id, propolis_addr); + + let metadata = InstanceMetadata { + silo_id: Uuid::new_v4(), + project_id: Uuid::new_v4(), + }; + + mgr.ensure_registered( + instance_id, + propolis_id, + hardware, + instance_runtime, + vmm_runtime, + propolis_addr, + metadata, + ) + .await + .unwrap(); + + mgr.ensure_state(instance_id, InstanceStateRequested::Running) + .await + .unwrap(); + + timeout( + TIMEOUT_DURATION, + state_rx.wait_for(|maybe_state| match maybe_state { + ReceivedInstanceState::InstancePut(sled_inst_state) => { + sled_inst_state.vmm_state.state == InstanceState::Running + } + _ => false, + }), + ) + .await + .expect("timed out waiting for InstanceState::Running in FakeNexus") + .expect("failed to receive FakeNexus' InstanceState"); + + storage_harness.cleanup().await; + logctx.cleanup_successful(); + } +} diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index c33abaa523..2c9780b3ce 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -750,6 +750,11 @@ impl InstanceTicket { InstanceTicket { id, terminate_tx: Some(terminate_tx) } } + #[cfg(test)] + pub(crate) fn new_without_manager_for_test(id: Uuid) -> Self { + Self { id, terminate_tx: None } + } + /// Idempotently removes this instance from the tracked set of /// instances. This acts as an "upcall" for instances to remove /// themselves after stopping. diff --git a/sled-agent/src/vmm_reservoir.rs b/sled-agent/src/vmm_reservoir.rs index d7b6b64ecf..d4cadcc3ec 100644 --- a/sled-agent/src/vmm_reservoir.rs +++ b/sled-agent/src/vmm_reservoir.rs @@ -119,6 +119,20 @@ impl VmmReservoirManagerHandle { } rx.await.map_err(|_| Error::ReplySenderDropped)? } + + /// TODO: We should be able run to tests in VMs that can use the real VmmReservoir + #[cfg(test)] + pub fn stub_for_test() -> Self { + let (tx, _) = flume::bounded(1); + let (size_updated_tx, _) = broadcast::channel(1); + let _manager_handle = Arc::new(thread::spawn(|| {})); + Self { + reservoir_size: Arc::new(AtomicU64::new(0)), + tx, + size_updated_tx, + _manager_handle, + } + } } /// Manage the VMM reservoir in a background thread From 67bd1fc93afbe5199dbf4494c5946fd3580d81b4 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 20 Mar 2024 06:12:52 +0000 Subject: [PATCH 2/4] remove stale comment --- sled-agent/src/instance.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index dd50833f56..0515b8c829 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -1689,7 +1689,6 @@ mod tests { (srv, client) } - // make a FakeStorageManager with a "U2" upserted async fn setup_storage_manager(log: &Logger) -> StorageManagerTestHarness { let mut harness = StorageManagerTestHarness::new(log).await; let raw_disks = From b7c1d67c7c215d6e231a2728e4d7b049e5706637 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 20 Mar 2024 18:05:59 +0000 Subject: [PATCH 3/4] make tests illumos only --- sled-agent/src/instance.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 0515b8c829..d016715591 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -1520,7 +1520,7 @@ impl InstanceRunner { } } -#[cfg(test)] +#[cfg(all(test, target_os = "illumos"))] mod tests { use super::*; use crate::fakes::nexus::{FakeNexusServer, ServerContext}; From c51bf3e4049d10eba26d61b5cd0faa0608fe2755 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Wed, 20 Mar 2024 19:09:11 +0000 Subject: [PATCH 4/4] fix unused test functions --- sled-agent/src/instance_manager.rs | 2 +- sled-agent/src/vmm_reservoir.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sled-agent/src/instance_manager.rs b/sled-agent/src/instance_manager.rs index 2c9780b3ce..cf6563b117 100644 --- a/sled-agent/src/instance_manager.rs +++ b/sled-agent/src/instance_manager.rs @@ -750,7 +750,7 @@ impl InstanceTicket { InstanceTicket { id, terminate_tx: Some(terminate_tx) } } - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] pub(crate) fn new_without_manager_for_test(id: Uuid) -> Self { Self { id, terminate_tx: None } } diff --git a/sled-agent/src/vmm_reservoir.rs b/sled-agent/src/vmm_reservoir.rs index d4cadcc3ec..caa1d88254 100644 --- a/sled-agent/src/vmm_reservoir.rs +++ b/sled-agent/src/vmm_reservoir.rs @@ -121,7 +121,7 @@ impl VmmReservoirManagerHandle { } /// TODO: We should be able run to tests in VMs that can use the real VmmReservoir - #[cfg(test)] + #[cfg(all(test, target_os = "illumos"))] pub fn stub_for_test() -> Self { let (tx, _) = flume::bounded(1); let (size_updated_tx, _) = broadcast::channel(1);