From 7d5f967c087e494d38b60b3e4f4ac65de52135d6 Mon Sep 17 00:00:00 2001 From: Kevin Carter Date: Thu, 25 Jan 2024 22:21:45 -0600 Subject: [PATCH] Rebase the nova values file This change updates our nova values file so that it permits the use of the updated nova chart. This will help address the issues we're seeing in #38 Signed-off-by: Kevin Carter --- helm-configs/nova/nova-helm-overrides.yaml | 210 +++++++++++++-------- 1 file changed, 132 insertions(+), 78 deletions(-) diff --git a/helm-configs/nova/nova-helm-overrides.yaml b/helm-configs/nova/nova-helm-overrides.yaml index a31a7aab..d6923c6c 100644 --- a/helm-configs/nova/nova-helm-overrides.yaml +++ b/helm-configs/nova/nova-helm-overrides.yaml @@ -75,8 +75,9 @@ images: nova_service_cleaner: "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal" dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' rabbit_init: docker.io/rabbitmq:3.7-management - nova_archive_deleted_rows: docker.io/openstackhelm/nova:wallaby-ubuntu_focal + nova_archive_deleted_rows: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby' + nova_storage_init: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal' test: docker.io/xrally/xrally-openstack:2.0.0 image_repo_sync: docker.io/docker:17.07.0 nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6 @@ -244,6 +245,13 @@ network: enabled: false port: 30680 spiceproxy: + ingress: + public: true + classes: + namespace: "nginx" + cluster: "nginx-openstack" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / node_port: enabled: false port: 30682 @@ -272,10 +280,6 @@ dependencies: ovn: compute: pod: [] - # - requireSameNode: true - # labels: - # application: ovn - # component: ovn-controller openvswitch: compute: pod: @@ -303,7 +307,6 @@ dependencies: - nova-db-sync - nova-ks-user - nova-ks-endpoints - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -316,7 +319,6 @@ dependencies: - nova-db-sync - nova-ks-user - nova-ks-endpoints - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -333,7 +335,6 @@ dependencies: cell_setup: jobs: - nova-db-sync - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -351,7 +352,6 @@ dependencies: service_cleaner: jobs: - nova-db-sync - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -369,7 +369,6 @@ dependencies: component: libvirt jobs: - nova-db-sync - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -384,7 +383,6 @@ dependencies: compute_ironic: jobs: - nova-db-sync - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -399,7 +397,6 @@ dependencies: conductor: jobs: - nova-db-sync - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -415,7 +412,6 @@ dependencies: service: oslo_db archive_deleted_rows: jobs: - # - nova-db-init - nova-db-sync db_init: services: @@ -423,7 +419,6 @@ dependencies: service: oslo_db db_sync: jobs: [] - # - nova-db-init services: - endpoint: internal service: oslo_db @@ -460,7 +455,6 @@ dependencies: scheduler: jobs: - nova-db-sync - # - nova-rabbit-init services: - endpoint: internal service: oslo_messaging @@ -492,23 +486,37 @@ console: spice: compute: # IF blank, search default routing interface - server_proxyclient_interface: + server_proxyclient_interface: null + # or set network cidr + server_proxyclient_network_cidr: 0/0 proxy: # IF blank, search default routing interface - server_proxyclient_interface: + server_proxyclient_interface: null + # or set network cidr + server_proxyclient_network_cidr: 0/0 novnc: compute: # IF blank, search default routing interface - vncserver_proxyclient_interface: + vncserver_proxyclient_interface: null + # or set network cidr + vncserver_proxyclient_network_cidr: 0/0 vncproxy: # IF blank, search default routing interface - vncserver_proxyclient_interface: + vncserver_proxyclient_interface: null + # or set network cidr + vncserver_proxyclient_network_cidr: 0/0 address_search_enabled: true ceph_client: configmap: ceph-etc user_secret_name: pvc-ceph-client-key +rbd_pool: + app_name: nova-vms + replication: 3 + crush_rule: replicated_rule + chunk_size: 8 + conf: security: | # @@ -1343,11 +1351,16 @@ conf: # When "address_search_enabled", get the IP address to be used as the target for live migration # traffic using interface name. # If this option is set to None, the hostname of the migration target compute node will be used. - live_migration_interface: + live_migration_interface: null + # or set cidr + live_migration_network_cidr: 0/0 hypervisor: address_search_enabled: true # my_ip can be set automatically through this interface name. - host_interface: + host_interface: null + # If host_interface is null there is a fallback mechanism to search + # for interface with routing using host network cidr. + host_network_cidr: 0/0 # This list is the keys to exclude from the config file ingested by nova-compute nova_compute_redactions: - database @@ -1548,7 +1561,7 @@ secrets: identity: admin: nova-keystone-admin nova: nova-keystone-user - # test: nova-keystone-test + test: nova-keystone-test oslo_db: admin: nova-db-admin nova: nova-db-user @@ -1578,7 +1591,8 @@ secrets: internal: metadata-tls-metadata compute_spice_proxy: spiceproxy: - internal: nova-tls-spiceproxy + public: nova-spiceproxy-tls-public + internal: nova-spiceproxy-tls-proxy oci_image_registry: nova: nova-oci-image-registry @@ -1762,14 +1776,14 @@ endpoints: project_name: service user_domain_name: service project_domain_name: service - # test: - # role: admin - # region_name: RegionOne - # username: nova-test - # password: password - # project_name: test - # user_domain_name: service - # project_domain_name: service + test: + role: admin + region_name: RegionOne + username: nova-test + password: password + project_name: test + user_domain_name: service + project_domain_name: service hosts: default: keystone internal: keystone-api @@ -1888,7 +1902,7 @@ endpoints: name: nova hosts: default: nova-spiceproxy - public: placement + public: spiceproxy host_fqdn_override: default: null path: @@ -1898,6 +1912,7 @@ endpoints: port: spice_proxy: default: 6082 + public: 6082 placement: name: placement hosts: @@ -1999,15 +2014,20 @@ pod: liveness: enabled: True params: - initialDelaySeconds: 120 periodSeconds: 90 timeoutSeconds: 70 readiness: enabled: True params: - initialDelaySeconds: 80 periodSeconds: 90 timeoutSeconds: 70 + startup: + enabled: True + params: + failureThreshold: 120 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 70 api-metadata: default: liveness: @@ -2319,135 +2339,166 @@ pod: enabled: false compute: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" compute_ironic: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" api_metadata: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" api: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" conductor: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" scheduler: requests: - memory: "64Mi" + memory: "128Mi" + cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" ssh: requests: - memory: "64Mi" + memory: "128Mi" + cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" novncproxy: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" spiceproxy: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" jobs: bootstrap: requests: - memory: "64Mi" + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + storage_init: + requests: + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" db_init: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" rabbit_init: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" db_sync: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" archive_deleted_rows: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" db_drop: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" ks_endpoints: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" ks_service: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" ks_user: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" tests: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" cell_setup: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" service_cleaner: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" image_repo_sync: requests: - memory: "64Mi" + memory: "128Mi" cpu: "100m" limits: - memory: "4096Mi" + memory: "1024Mi" + cpu: "2000m" network_policy: nova: @@ -2486,8 +2537,10 @@ manifests: deployment_scheduler: true ingress_metadata: true ingress_novncproxy: true + ingress_spiceproxy: false ingress_osapi: true job_bootstrap: true + job_storage_init: false job_db_init: false job_db_sync: true job_db_drop: false @@ -2510,6 +2563,7 @@ manifests: secret_registry: true service_ingress_metadata: true service_ingress_novncproxy: true + service_ingress_spiceproxy: false service_ingress_osapi: true service_metadata: true service_novncproxy: true