diff --git a/nexus/db-model/src/vpc_router.rs b/nexus/db-model/src/vpc_router.rs index ce8fa97364..9ceb6e3945 100644 --- a/nexus/db-model/src/vpc_router.rs +++ b/nexus/db-model/src/vpc_router.rs @@ -42,8 +42,8 @@ pub struct VpcRouter { #[diesel(embed)] identity: VpcRouterIdentity, - pub vpc_id: Uuid, pub kind: VpcRouterKind, + pub vpc_id: Uuid, pub rcgen: Generation, pub resolved_version: i64, } diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index db8c6f12aa..957c0cb65d 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -1142,6 +1142,8 @@ impl DataStore { // Unlink all subnets from this router. // XXX: We might this want to error out before the delete fires. + // XXX: This will temporarily leave some hanging subnet attachments. We need to be sure + // these are safely handled (no unwrap, or remove via join here.) use db::schema::vpc_subnet::dsl as vpc; diesel::update(vpc::vpc_subnet) .filter(vpc::time_deleted.is_null()) diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 2aef32d37c..57e1113a91 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -26,6 +26,7 @@ use nexus_types::external_api::views::FloatingIp; use nexus_types::external_api::views::IpPool; use nexus_types::external_api::views::IpPoolRange; use nexus_types::external_api::views::User; +use nexus_types::external_api::views::VpcSubnet; use nexus_types::external_api::views::{Project, Silo, Vpc, VpcRouter}; use nexus_types::identity::Resource; use nexus_types::internal_api::params as internal_params; @@ -35,6 +36,8 @@ use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; use omicron_common::api::external::InstanceCpuCount; +use omicron_common::api::external::Ipv4Net; +use omicron_common::api::external::Ipv6Net; use omicron_common::api::external::NameOrId; use omicron_common::disk::DiskIdentity; use omicron_sled_agent::sim::SledAgent; @@ -559,6 +562,32 @@ pub async fn create_vpc_with_error( .unwrap() } +pub async fn create_vpc_subnet( + client: &ClientTestContext, + project_name: &str, + vpc_name: &str, + subnet_name: &str, + ipv4_block: Ipv4Net, + ipv6_block: Option, + custom_router: Option<&str>, +) -> VpcSubnet { + object_create( + &client, + &format!("/v1/vpc-subnets?project={project_name}&vpc={vpc_name}"), + ¶ms::VpcSubnetCreate { + identity: IdentityMetadataCreateParams { + name: subnet_name.parse().unwrap(), + description: "vpc description".to_string(), + }, + ipv4_block, + ipv6_block, + custom_router: custom_router + .map(|n| NameOrId::Name(n.parse().unwrap())), + }, + ) + .await +} + pub async fn create_router( client: &ClientTestContext, project_name: &str, diff --git a/nexus/tests/integration_tests/vpc_routers.rs b/nexus/tests/integration_tests/vpc_routers.rs index 0b931efbd7..d59dad4afd 100644 --- a/nexus/tests/integration_tests/vpc_routers.rs +++ b/nexus/tests/integration_tests/vpc_routers.rs @@ -9,14 +9,20 @@ use nexus_test_utils::http_testing::NexusRequest; use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; use nexus_test_utils::resource_helpers::create_router; +use nexus_test_utils::resource_helpers::create_vpc_subnet; +use nexus_test_utils::resource_helpers::object_delete; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{create_project, create_vpc}; +use nexus_test_utils::resource_helpers::{object_put, object_put_error}; use nexus_test_utils_macros::nexus_test; use nexus_types::external_api::params; +use nexus_types::external_api::params::VpcSubnetUpdate; use nexus_types::external_api::views::VpcRouter; use nexus_types::external_api::views::VpcRouterKind; +use nexus_types::external_api::views::VpcSubnet; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::IdentityMetadataUpdateParams; +use omicron_common::api::external::Ipv4Net; type ControlPlaneTestContext = nexus_test_utils::ControlPlaneTestContext; @@ -253,6 +259,217 @@ async fn test_vpc_routers(cptestctx: &ControlPlaneTestContext) { assert_eq!(router_same_name.vpc_id, vpc2.identity.id); } +#[nexus_test] +async fn test_vpc_routers_attach_to_subnet( + cptestctx: &ControlPlaneTestContext, +) { + // XXX: really clean this up. + let client = &cptestctx.external_client; + + // --- + // XX: copied from above + // + + // Create a project that we'll use for testing. + // This includes the vpc 'default'. + let project_name = "springfield-squidport"; + let _ = create_project(&client, project_name).await; + let vpc_name = "default"; + + let routers_url = + format!("/v1/vpc-routers?project={}&vpc={}", project_name, vpc_name); + let subnets_url = + format!("/v1/vpc-subnets?project={}&vpc={}", project_name, vpc_name); + + // get routers should have only the system router created w/ the VPC + let routers = + objects_list_page_authz::(client, &routers_url).await.items; + assert_eq!(routers.len(), 1); + assert_eq!(routers[0].kind, VpcRouterKind::System); + // + // XX: copied from above + // --- + + // Create a custom router for later use. + let router_name = "routy"; + let router = + create_router(&client, project_name, vpc_name, router_name).await; + assert_eq!(router.kind, VpcRouterKind::Custom); + + // Attaching a system router should fail. + let err = object_put_error( + client, + &format!( + "/v1/vpc-subnets/default?project={project_name}&vpc={vpc_name}" + ), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router: Some(routers[0].identity.id.into()), + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(err.message, "cannot attach a system router to a VPC subnet"); + + // Attaching a new custom router should succeed. + let default_subnet: VpcSubnet = object_put( + client, + &format!( + "/v1/vpc-subnets/default?project={project_name}&vpc={vpc_name}" + ), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router: Some(router.identity.id.into()), + }, + ) + .await; + assert_eq!(default_subnet.custom_router_id, Some(router.identity.id)); + + // Attaching a custom router to another subnet (same VPC) should succeed: + // ... at create time. + let subnet_name = "subnetty"; + let subnet2 = create_vpc_subnet( + &client, + &project_name, + &vpc_name, + &subnet_name, + Ipv4Net("192.168.0.0/24".parse().unwrap()), + None, + Some(router_name), + ) + .await; + assert_eq!(subnet2.custom_router_id, Some(router.identity.id)); + + // ... and via update. + let subnet_name = "subnettier"; + let _ = create_vpc_subnet( + &client, + &project_name, + &vpc_name, + &subnet_name, + Ipv4Net("192.168.1.0/24".parse().unwrap()), + None, + None, + ) + .await; + + let subnet3: VpcSubnet = object_put( + client, + &format!( + "/v1/vpc-subnets/{subnet_name}?project={project_name}&vpc={vpc_name}", + ), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router: Some(router.identity.id.into()), + }, + ) + .await; + + assert_eq!(subnet3.custom_router_id, Some(router.identity.id)); + + // Attaching a custom router to another VPC's subnet should fail. + create_vpc(&client, project_name, "vpc1").await; + let err = object_put_error( + client, + &format!("/v1/vpc-subnets/default?project={project_name}&vpc=vpc1"), + &VpcSubnetUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: None, + }, + custom_router: Some(router.identity.id.into()), + }, + StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!(err.message, "router and subnet must belong to the same VPC"); + + // Deleting a custom router should detach from all these subnets. + object_delete( + &client, + &format!( + "/v1/vpc-routers/{router_name}?vpc={}&project={project_name}", + "default" + ), + ) + .await; + + for subnet in + objects_list_page_authz::(client, &subnets_url).await.items + { + assert!(subnet.custom_router_id.is_none()); + } +} + +#[nexus_test] +async fn test_vpc_routers_custom_route_at_instance( + cptestctx: &ControlPlaneTestContext, +) { + let _client = &cptestctx.external_client; + + // Attempting to delete a system router should fail. + + // Attempting to add a new route to a system router should fail. + + // Attempting to modify/delete a VPC subnet route should fail. + + // Modifying the target of a Default (gateway) route should succeed. + + todo!() +} + +#[nexus_test] +async fn test_vpc_routers_modify_system_routes( + cptestctx: &ControlPlaneTestContext, +) { + let _client = &cptestctx.external_client; + + // Attempting to delete a system router should fail. + + // Attempting to add a new route to a system router should fail. + + // Attempting to modify/delete a VPC subnet route should fail. + + // Modifying the target of a Default (gateway) route should succeed. + + todo!() +} + +#[nexus_test] +async fn test_vpc_routers_internet_gateway_target( + cptestctx: &ControlPlaneTestContext, +) { + let _client = &cptestctx.external_client; + + // Internet gateways are not fully supported: only 'inetgw:outbound' + // is a valid choice. + + // This can be used in both system and custom routers. + + todo!() +} + +#[nexus_test] +async fn test_vpc_routers_disallowed_custom_targets( + cptestctx: &ControlPlaneTestContext, +) { + let _client = &cptestctx.external_client; + + // Neither 'vpc:xxx' nor 'subnet:xxx' can be specified as route targets + // in custom routers. + + todo!() +} + fn routers_eq(sn1: &VpcRouter, sn2: &VpcRouter) { identity_eq(&sn1.identity, &sn2.identity); assert_eq!(sn1.vpc_id, sn2.vpc_id); diff --git a/smf/sled-agent/non-gimlet/config-rss.toml b/smf/sled-agent/non-gimlet/config-rss.toml index 595c53dc2f..d897f7ba4b 100644 --- a/smf/sled-agent/non-gimlet/config-rss.toml +++ b/smf/sled-agent/non-gimlet/config-rss.toml @@ -33,8 +33,7 @@ external_dns_zone_name = "oxide.test" # the DNS domain delegated to the rack by the customer. Each of these addresses # must be contained in one of the "internal services" IP Pool ranges listed # below. -# external_dns_ips = [ "192.168.1.20", "192.168.1.21" ] -external_dns_ips = [ "10.1.222.1", "10.1.222.2" ] +external_dns_ips = [ "192.168.1.20", "192.168.1.21" ] # Initial TLS certificates for the external API # @@ -70,10 +69,8 @@ external_certificates = [] # # For more on this and what to put here, see docs/how-to-run.adoc. [[internal_services_ip_pool_ranges]] -# first = "192.168.1.20" -# last = "192.168.1.29" -first = "10.1.222.1" -last = "10.1.222.10" +first = "192.168.1.20" +last = "192.168.1.29" # TODO - this configuration is subject to change going forward. Ultimately these # parameters should be provided to the control plane via wicket, but we need to @@ -94,10 +91,8 @@ rack_subnet = "fd00:1122:3344:0100::/56" # A range of IP addresses used by Boundary Services on the external network. In # a real system, these would be addresses of the uplink ports on the Sidecar. # With softnpu, only one address is used. -# infra_ip_first = "192.168.1.30" -# infra_ip_last = "192.168.1.30" -infra_ip_first = "10.1.222.10" -infra_ip_last = "10.1.222.10" +infra_ip_first = "192.168.1.30" +infra_ip_last = "192.168.1.30" # Configurations for BGP routers to run on the scrimlets. bgp = [] @@ -105,11 +100,9 @@ bgp = [] # You can configure multiple uplinks by repeating the following stanza [[rack_network_config.ports]] # Routes associated with this port. -# routes = [{nexthop = "192.168.1.199", destination = "0.0.0.0/0"}] -routes = [{nexthop = "10.0.0.1", destination = "0.0.0.0/0"}] +routes = [{nexthop = "192.168.1.199", destination = "0.0.0.0/0"}] # Addresses associated with this port. -# addresses = ["192.168.1.30/24"] -addresses = ["10.1.222.10/8"] +addresses = ["192.168.1.30/24"] # Name of the uplink port. This should always be "qsfp0" when using softnpu. port = "qsfp0" # The speed of this port. diff --git a/smf/sled-agent/non-gimlet/config.toml b/smf/sled-agent/non-gimlet/config.toml index e1b1b91b06..77ca52a647 100644 --- a/smf/sled-agent/non-gimlet/config.toml +++ b/smf/sled-agent/non-gimlet/config.toml @@ -64,7 +64,7 @@ swap_device_size_gb = 64 # # If empty, this will be equivalent to the first result from: # $ dladm show-phys -p -o LINK -data_link = "rge0" +# data_link = "igb0" # On a multi-sled system, transit-mode Maghemite runs in the `oxz_switch` zone # to configure routes between sleds. This runs over the Sidecar's rear ports