diff --git a/.github/workflows/run-tests.sh b/.github/workflows/run-tests.sh index d1f9a7d7..71d1447e 100755 --- a/.github/workflows/run-tests.sh +++ b/.github/workflows/run-tests.sh @@ -25,43 +25,47 @@ for package in "${CHANGED_FILES[@]}"; do fi done +function run_test() { + DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud HOST=$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":$1 +} + +# Run the basic funtional end to end tests for the CDR recipe +run_test "recipe" + if [[ ${#changed_packages[@]} -eq 0 ]] || [[ "${!changed_packages[*]}" == *"utils"* ]] || [[ "${!changed_packages[*]}" == *"features/steps"* ]] || [[ "${!changed_packages[*]}" == *"infrastructure"* ]] ; then - openhim_ran="true" - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":openhim + openhimRan="true" + run_test "openhim" else for folder_name in "${!changed_packages[@]}"; do echo "$folder_name was changed" if [[ $folder_name == *"clickhouse"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":clickhouse + run_test "clickhouse" elif [[ $folder_name == *"elastic"* ]] || [[ $folder_name == *"kibana"* ]] || [[ $folder_name == *"logstash"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":elk + run_test "elk" elif [[ $folder_name == *"kafka"* ]] || [[ $folder_name == *"monitoring"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":kafka + run_test "kafka" elif [[ $folder_name == *"openhim"* ]] && [[ $openhimRan == "false" ]]; then openhimRan="true" - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":openhim + run_test "openhim" elif [[ $folder_name == *"reverse-proxy"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":nginx + run_test "nginx" elif [[ $folder_name == *"hapi"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":hapi + run_test "hapi" elif [[ $folder_name == *"santempi"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":sante + run_test "sante" elif [[ $folder_name == *"monitoring"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":monitoring + run_test "monitoring" elif [[ $folder_name == *"keycloak"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":keycloak + run_test "keycloak" elif [[ $folder_name == *"superset"* ]] && [[ $NODE_MODE == "single" ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":superset + run_test "superset" elif [[ $folder_name == *"jsreport"* ]] && [[ $NODE_MODE == "single" ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":jsreport + run_test "jsreport" elif [[ $folder_name == *"mpi-mediator"* ]] && [[ $NODE_MODE == "single" ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":mpi-mediator + run_test "mpi-mediator" elif [[ $folder_name == *"jempi"* ]] && [[ $NODE_MODE == "single" ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":jempi + run_test "jempi" fi done fi - -# Run the basic funtional end to end tests for the CDR recipe -DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud HOST=$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":recipe diff --git a/analytics-datastore-clickhouse/docker-compose.cluster.yml b/analytics-datastore-clickhouse/docker-compose.cluster.yml index f2c6b61b..4e970066 100644 --- a/analytics-datastore-clickhouse/docker-compose.cluster.yml +++ b/analytics-datastore-clickhouse/docker-compose.cluster.yml @@ -1,17 +1,17 @@ -version: '3.9' +version: "3.9" services: analytics-datastore-clickhouse-01: - image: clickhouse/clickhouse-server + image: ${CLICKHOUSE_IMAGE} ulimits: noFile: 262144 volumes: - clickhouse-data-01:/var/lib/clickhouse/ hostname: analytics-datastore-clickhouse-01 - deploy: - placement: - constraints: - - "node.labels.name==node-1" + deploy: + placement: + constraints: + - "node.labels.name==${ANALYTICS_DATASTORE_CLICKHOUSE_01_PLACEMENT}" configs: - target: /etc/clickhouse-server/config.d/docker_related_config.xml source: docker_related_config.xml @@ -36,12 +36,12 @@ services: default: analytics-datastore-clickhouse-02: - image: clickhouse/clickhouse-server + image: ${CLICKHOUSE_IMAGE} hostname: analytics-datastore-clickhouse-02 - deploy: - placement: - constraints: - - "node.labels.name==node-2" + deploy: + placement: + constraints: + - "node.labels.name==${ANALYTICS_DATASTORE_CLICKHOUSE_02_PLACEMENT}" ulimits: noFile: 262144 volumes: @@ -70,16 +70,16 @@ services: default: analytics-datastore-clickhouse-03: - image: clickhouse/clickhouse-server + image: ${CLICKHOUSE_IMAGE} hostname: analytics-datastore-clickhouse-03 - deploy: - placement: - constraints: - - "node.labels.name==node-3" + deploy: + placement: + constraints: + - "node.labels.name==${ANALYTICS_DATASTORE_CLICKHOUSE_03_PLACEMENT}" ulimits: noFile: 262144 volumes: - - clickhouse-data-03:/var/lib/clickhouse/ + - clickhouse-data-03:/var/lib/clickhouse/ configs: - target: /etc/clickhouse-server/config.d/docker_related_config.xml source: docker_related_config.xml @@ -109,7 +109,7 @@ services: ulimits: noFile: 262144 volumes: - - clickhouse-data-04:/var/lib/clickhouse/ + - clickhouse-data-04:/var/lib/clickhouse/ configs: - target: /etc/clickhouse-server/config.d/docker_related_config.xml source: docker_related_config.xml @@ -136,77 +136,77 @@ volumes: clickhouse-data-02: clickhouse-data-03: clickhouse-data-04: - + configs: docker_related_config.xml: file: ./cluster_configs/docker_related_config.xml name: docker_related_config.xml-${docker_related_config_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_enable_keeper_01.xml: file: ./cluster_configs/enable_keeper_01.xml name: enable_keeper_01.xml-${enable_keeper_01_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_enable_keeper_02.xml: file: ./cluster_configs/enable_keeper_02.xml name: enable_keeper_02.xml-${enable_keeper_02_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_enable_keeper_03.xml: file: ./cluster_configs/enable_keeper_03.xml name: enable_keeper_03.xml-${enable_keeper_03_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_macros_01.xml: file: ./cluster_configs/macros_01.xml name: macros_01.xml-${macros_01_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_macros_02.xml: file: ./cluster_configs/macros_02.xml name: macros_02.xml-${macros_02_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_macros_03.xml: file: ./cluster_configs/macros_03.xml name: macros_03.xml-${macros_03_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_macros_04.xml: file: ./cluster_configs/macros_04.xml name: macros_04.xml-${macros_04_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_remote_servers.xml: file: ./cluster_configs/remote_servers.xml name: remote_servers.xml-${remote_servers_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_use_keeper.xml: file: ./cluster_configs/use_keeper.xml name: use_keeper.xml-${use_keeper_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_metric_log.xml: file: ./general_configs/metric_log.xml name: metric_log.xml.xml-${metric_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_part_log.xml: file: ./general_configs/part_log.xml name: part_log.xml.xml-${part_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_query_log.xml: file: ./general_configs/query_log.xml name: query_log.xml.xml-${query_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_trace_log.xml: file: ./general_configs/trace_log.xml name: trace_log.xml.xml-${trace_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse networks: diff --git a/analytics-datastore-clickhouse/docker-compose.yml b/analytics-datastore-clickhouse/docker-compose.yml index ce6363a8..3f175532 100644 --- a/analytics-datastore-clickhouse/docker-compose.yml +++ b/analytics-datastore-clickhouse/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: analytics-datastore-clickhouse: - image: clickhouse/clickhouse-server + image: ${CLICKHOUSE_IMAGE} ulimits: noFile: 262144 volumes: @@ -15,7 +15,7 @@ services: - target: /etc/clickhouse-server/config.d/query_log.xml source: clickhouse_query_log.xml - target: /etc/clickhouse-server/config.d/trace_log.xml - source: clickhouse_trace_log.xml + source: clickhouse_trace_log.xml networks: public: default: @@ -27,22 +27,22 @@ configs: clickhouse_metric_log.xml: file: ./general_configs/metric_log.xml name: metric_log.xml.xml-${metric_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_part_log.xml: file: ./general_configs/part_log.xml name: part_log.xml.xml-${part_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_query_log.xml: file: ./general_configs/query_log.xml name: query_log.xml.xml-${query_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse clickhouse_trace_log.xml: file: ./general_configs/trace_log.xml name: trace_log.xml.xml-${trace_log_xml_DIGEST:?err} - labels: + labels: name: clickhouse networks: diff --git a/analytics-datastore-clickhouse/package-metadata.json b/analytics-datastore-clickhouse/package-metadata.json index 680a3bb7..a7b0efb7 100644 --- a/analytics-datastore-clickhouse/package-metadata.json +++ b/analytics-datastore-clickhouse/package-metadata.json @@ -7,6 +7,10 @@ "dependencies": [], "environmentVariables": { "CLICKHOUSE_HOST": "analytics-datastore-clickhouse", - "CLICKHOUSE_PORT": "8123" + "CLICKHOUSE_PORT": "8123", + "CLICKHOUSE_IMAGE": "clickhouse/clickhouse-server:23.8.14.6", + "ANALYTICS_DATASTORE_CLICKHOUSE_01_PLACEMENT": "node-1", + "ANALYTICS_DATASTORE_CLICKHOUSE_02_PLACEMENT": "node-2", + "ANALYTICS_DATASTORE_CLICKHOUSE_03_PLACEMENT": "node-3" } } diff --git a/analytics-datastore-elastic-search/docker-compose.certs.yml b/analytics-datastore-elastic-search/docker-compose.certs.yml index 699bc390..3798771f 100644 --- a/analytics-datastore-elastic-search/docker-compose.certs.yml +++ b/analytics-datastore-elastic-search/docker-compose.certs.yml @@ -2,7 +2,7 @@ version: "3.9" services: create_certs: - image: docker.elastic.co/elasticsearch/elasticsearch:7.13.0 + image: ${ES_IMAGE} deploy: placement: constraints: diff --git a/analytics-datastore-elastic-search/docker-compose.cluster.yml b/analytics-datastore-elastic-search/docker-compose.cluster.yml index 7b802498..a35aa21b 100644 --- a/analytics-datastore-elastic-search/docker-compose.cluster.yml +++ b/analytics-datastore-elastic-search/docker-compose.cluster.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: analytics-datastore-elastic-search-01: - image: docker.elastic.co/elasticsearch/elasticsearch:7.13.0 + image: ${ES_IMAGE} ulimits: memlock: soft: -1 @@ -16,20 +16,20 @@ services: memory: ${ES_MEMORY_RESERVE} placement: constraints: - - "node.labels.name==node-1" + - "node.labels.name==${ES_01_PLACEMENT}" environment: node.name: es01 cluster.name: es-cluster discovery.seed_hosts: analytics-datastore-elastic-search-02,analytics-datastore-elastic-search-03 cluster.initial_master_nodes: es01 xpack.license.self_generated.type: basic - bootstrap.memory_lock: 'true' - xpack.security.enabled: 'true' - xpack.security.http.ssl.enabled: 'false' + bootstrap.memory_lock: "true" + xpack.security.enabled: "true" + xpack.security.http.ssl.enabled: "false" xpack.security.http.ssl.key: certs/es01/es01.key xpack.security.http.ssl.certificate_authorities: certs/ca/ca.crt xpack.security.http.ssl.certificate: certs/es01/es01.crt - xpack.security.transport.ssl.enabled: 'true' + xpack.security.transport.ssl.enabled: "true" xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.certificate_authorities: certs/ca/ca.crt xpack.security.transport.ssl.certificate: certs/es01/es01.crt @@ -52,7 +52,7 @@ services: public: analytics-datastore-elastic-search-02: - image: docker.elastic.co/elasticsearch/elasticsearch:7.13.0 + image: ${ES_IMAGE} ulimits: memlock: soft: -1 @@ -66,20 +66,20 @@ services: memory: ${ES_MEMORY_RESERVE} placement: constraints: - - "node.labels.name==node-2" + - "node.labels.name==${ES_02_PLACEMENT}" environment: node.name: es02 cluster.name: es-cluster discovery.seed_hosts: analytics-datastore-elastic-search-01,analytics-datastore-elastic-search-03 cluster.initial_master_nodes: es01 - bootstrap.memory_lock: 'true' - xpack.security.enabled: 'true' + bootstrap.memory_lock: "true" + xpack.security.enabled: "true" xpack.license.self_generated.type: basic - xpack.security.http.ssl.enabled: 'false' + xpack.security.http.ssl.enabled: "false" xpack.security.http.ssl.key: certs/es02/es02.key xpack.security.http.ssl.certificate_authorities: certs/ca/ca.crt xpack.security.http.ssl.certificate: certs/es02/es02.crt - xpack.security.transport.ssl.enabled: 'true' + xpack.security.transport.ssl.enabled: "true" xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.certificate_authorities: certs/ca/ca.crt xpack.security.transport.ssl.certificate: certs/es02/es02.crt @@ -102,7 +102,7 @@ services: public: analytics-datastore-elastic-search-03: - image: docker.elastic.co/elasticsearch/elasticsearch:7.13.0 + image: ${ES_IMAGE} ulimits: memlock: soft: -1 @@ -116,20 +116,20 @@ services: memory: ${ES_MEMORY_RESERVE} placement: constraints: - - "node.labels.name==node-3" + - "node.labels.name==${ES_03_PLACEMENT}" environment: node.name: es03 cluster.name: es-cluster discovery.seed_hosts: analytics-datastore-elastic-search-01,analytics-datastore-elastic-search-02 cluster.initial_master_nodes: es01 - bootstrap.memory_lock: 'true' - xpack.security.enabled: 'true' + bootstrap.memory_lock: "true" + xpack.security.enabled: "true" xpack.license.self_generated.type: basic - xpack.security.http.ssl.enabled: 'false' + xpack.security.http.ssl.enabled: "false" xpack.security.http.ssl.key: certs/es03/es03.key xpack.security.http.ssl.certificate_authorities: certs/ca/ca.crt xpack.security.http.ssl.certificate: certs/es03/es03.crt - xpack.security.transport.ssl.enabled: 'true' + xpack.security.transport.ssl.enabled: "true" xpack.security.transport.ssl.verification_mode: certificate xpack.security.transport.ssl.certificate_authorities: certs/ca/ca.crt xpack.security.transport.ssl.certificate: certs/es03/es03.crt diff --git a/analytics-datastore-elastic-search/docker-compose.yml b/analytics-datastore-elastic-search/docker-compose.yml index f6b86b78..e49b2916 100644 --- a/analytics-datastore-elastic-search/docker-compose.yml +++ b/analytics-datastore-elastic-search/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: analytics-datastore-elastic-search: - image: docker.elastic.co/elasticsearch/elasticsearch:7.13.0 + image: ${ES_IMAGE} ulimits: memlock: soft: -1 @@ -16,9 +16,9 @@ services: environment: node.name: es01 discovery.type: single-node - bootstrap.memory_lock: 'true' - xpack.security.enabled: 'true' - xpack.monitoring.collection.enabled: 'true' + bootstrap.memory_lock: "true" + xpack.security.enabled: "true" + xpack.monitoring.collection.enabled: "true" ES_JAVA_OPTS: ${ES_HEAP_SIZE} search.max_buckets: 1000000 search.default_search_timeout: -1 diff --git a/analytics-datastore-elastic-search/package-metadata.json b/analytics-datastore-elastic-search/package-metadata.json index 0f305edb..facad20b 100644 --- a/analytics-datastore-elastic-search/package-metadata.json +++ b/analytics-datastore-elastic-search/package-metadata.json @@ -17,6 +17,10 @@ "ES_HEAP_SIZE": "-Xms2048m -Xmx2048m", "ES_SSL": "false", "ES_MEMORY_LIMIT": "3G", - "ES_MEMORY_RESERVE": "500M" + "ES_MEMORY_RESERVE": "500M", + "ES_IMAGE": "docker.elastic.co/elasticsearch/elasticsearch:7.13.0", + "ES_01_PLACEMENT": "node-1", + "ES_02_PLACEMENT": "node-2", + "ES_03_PLACEMENT": "node-3" } } diff --git a/dashboard-visualiser-jsreport/docker-compose.yml b/dashboard-visualiser-jsreport/docker-compose.yml index 3d87f3c7..1d145e0f 100644 --- a/dashboard-visualiser-jsreport/docker-compose.yml +++ b/dashboard-visualiser-jsreport/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: dashboard-visualiser-jsreport: - image: jsreport/jsreport:3.2.0 + image: ${JS_REPORT_IMAGE} healthcheck: test: wget --no-verbose --tries=1 --spider -q http://localhost:5488/api/ping || exit 1 interval: 10s @@ -11,7 +11,7 @@ services: start_period: 10s deploy: placement: - max_replicas_per_node: 1 + max_replicas_per_node: ${JS_REPORT_MAX_REPLICAS_PER_NODE} replicas: ${JS_REPORT_INSTANCES} resources: limits: @@ -21,14 +21,14 @@ services: cpus: ${JS_REPORT_CPU_RESERVE} memory: ${JS_REPORT_MEMORY_RESERVE} labels: - co.elastic.metrics/module: 'docker' - co.elastic.metrics/metricsets: 'cpu,memory,diskio,info,healthcheck,container' + co.elastic.metrics/module: "docker" + co.elastic.metrics/metricsets: "cpu,memory,diskio,info,healthcheck,container" environment: - allowLocalFilesAccess: 'true' - extensions_fsStore_dataDirectory: 'jsreport/data' - extensions_fsStore_externalModificationsSync: 'true' + allowLocalFilesAccess: "true" + extensions_fsStore_dataDirectory: "jsreport/data" + extensions_fsStore_externalModificationsSync: "true" extensions_authentication_cookieSession_secret: ${JS_REPORT_SECRET} - extensions_authentication_admin_username: 'admin' + extensions_authentication_admin_username: "admin" extensions_authentication_admin_password: ${JS_REPORT} ES_PASSWORD: ${ES_ELASTIC} licenseKey: ${JS_REPORT_LICENSE_KEY} diff --git a/dashboard-visualiser-jsreport/package-metadata.json b/dashboard-visualiser-jsreport/package-metadata.json index 20e1ad78..a06074c6 100644 --- a/dashboard-visualiser-jsreport/package-metadata.json +++ b/dashboard-visualiser-jsreport/package-metadata.json @@ -6,7 +6,9 @@ "version": "0.0.1", "dependencies": ["analytics-datastore-elastic-search"], "environmentVariables": { + "JS_REPORT_IMAGE": "jsreport/jsreport:3.2.0", "JS_REPORT_INSTANCES": "1", + "JS_REPORT_MAX_REPLICAS_PER_NODE": "1", "JS_REPORT_USERNAME": "admin", "JS_REPORT_SECRET": "dev_secret_only", "JS_REPORT": "dev_password_only", diff --git a/dashboard-visualiser-kibana/docker-compose.yml b/dashboard-visualiser-kibana/docker-compose.yml index 6064821d..30e3634f 100644 --- a/dashboard-visualiser-kibana/docker-compose.yml +++ b/dashboard-visualiser-kibana/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: dashboard-visualiser-kibana: - image: docker.elastic.co/kibana/kibana:7.13.0 + image: ${KIBANA_IMAGE} healthcheck: test: curl --fail http://localhost:5601 || exit 1 interval: 10s @@ -33,7 +33,7 @@ configs: kibana-kibana-cluster.yml: file: ./kibana.cluster.yml name: kibana-kibana-cluster.yml-${kibana_kibana_cluster_yml_DIGEST:?err} - labels: + labels: name: kibana networks: diff --git a/dashboard-visualiser-kibana/package-metadata.json b/dashboard-visualiser-kibana/package-metadata.json index 7a3bb05d..95d8b144 100644 --- a/dashboard-visualiser-kibana/package-metadata.json +++ b/dashboard-visualiser-kibana/package-metadata.json @@ -9,6 +9,7 @@ "KIBANA_INSTANCES": 1, "ES_LEADER_NODE": "analytics-datastore-elastic-search", "ES_KIBANA_SYSTEM": "dev_password_only", + "KIBANA_IMAGE": "docker.elastic.co/kibana/kibana:7.13.0", "KIBANA_PASSWORD": "dev_password_only", "KIBANA_USERNAME": "elastic", "KIBANA_YML_CONFIG": "kibana-kibana.yml", diff --git a/dashboard-visualiser-superset/docker-compose.postgres.cluster.yml b/dashboard-visualiser-superset/docker-compose.postgres.cluster.yml index e4c6ff84..b6e31cbd 100644 --- a/dashboard-visualiser-superset/docker-compose.postgres.cluster.yml +++ b/dashboard-visualiser-superset/docker-compose.postgres.cluster.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: postgres-metastore: - deploy: - placement: - constraints: - - "node.labels.name==node-2" + deploy: + placement: + constraints: + - "node.labels.name==${POSTGRES_METASTORE}" diff --git a/dashboard-visualiser-superset/docker-compose.postgres.yml b/dashboard-visualiser-superset/docker-compose.postgres.yml index cc4cceae..912914cd 100644 --- a/dashboard-visualiser-superset/docker-compose.postgres.yml +++ b/dashboard-visualiser-superset/docker-compose.postgres.yml @@ -2,7 +2,7 @@ version: "3.9" services: postgres-metastore: - image: postgres:16.2 + image: ${SS_POSTGRES_IMAGE} environment: POSTGRES_USER: ${SUPERSET_POSTGRESQL_USERNAME} POSTGRES_PASSWORD: ${SUPERSET_POSTGRESQL_PASSWORD} diff --git a/dashboard-visualiser-superset/package-metadata.json b/dashboard-visualiser-superset/package-metadata.json index a5068d95..0d63ff8b 100644 --- a/dashboard-visualiser-superset/package-metadata.json +++ b/dashboard-visualiser-superset/package-metadata.json @@ -7,6 +7,8 @@ "dependencies": ["analytics-datastore-clickhouse"], "environmentVariables": { "SUPERSET_IMAGE": "apache/superset:3.1.1", + "SS_POSTGRES_IMAGE": "postgres:16.2", + "POSTGRES_METASTORE": "node-2", "SUPERSET_ENABLED_FEATURE_FLAGS": "DASHBOARD_RBAC", "SUPERSET_USERNAME": "admin", "SUPERSET_FIRSTNAME": "SUPERSET", diff --git a/database-postgres/docker-compose-pgpool.cluster.yml b/database-postgres/docker-compose-pgpool.cluster.yml index 0addf043..2a23b101 100644 --- a/database-postgres/docker-compose-pgpool.cluster.yml +++ b/database-postgres/docker-compose-pgpool.cluster.yml @@ -1,12 +1,12 @@ -version: '3.9' +version: "3.9" services: pgpool-1: - image: bitnami/pgpool:4.4.3 + image: ${PG_POOL_IMAGE} deploy: placement: constraints: - - "node.labels.name==node-1" + - "node.labels.name==${PGPOOL_1_PLACEMENT}" replicas: 1 resources: limits: @@ -39,11 +39,11 @@ services: PGPOOL_USER_CONF_FILE: "/config/custom_pgpool.conf" pgpool-2: - image: bitnami/pgpool:4.4.3 + image: ${PG_POOL_IMAGE} deploy: placement: constraints: - - "node.labels.name==node-2" + - "node.labels.name==${PGPOOL_2_PLACEMENT}" replicas: 1 resources: limits: @@ -76,11 +76,11 @@ services: PGPOOL_USER_CONF_FILE: "/config/custom_pgpool.conf" pgpool-3: - image: bitnami/pgpool:4.4.3 + image: ${PG_POOL_IMAGE} deploy: placement: constraints: - - "node.labels.name==node-3" + - "node.labels.name==${PGPOOL_3_PLACEMENT}" replicas: 1 resources: limits: diff --git a/database-postgres/docker-compose-postgres.cluster.yml b/database-postgres/docker-compose-postgres.cluster.yml index 88fab559..b7660c98 100644 --- a/database-postgres/docker-compose-postgres.cluster.yml +++ b/database-postgres/docker-compose-postgres.cluster.yml @@ -1,4 +1,4 @@ -version: '3.9' +version: "3.9" services: postgres-1: @@ -7,7 +7,7 @@ services: deploy: placement: constraints: - - "node.labels.name==node-1" + - "node.labels.name==${POSTGRES_1_PLACEMENT}" postgres-2: image: bitnami/postgresql-repmgr:14 @@ -23,14 +23,14 @@ services: REPMGR_FAILOVER: ${POSTGRES_FAILOVER} REPMGR_DEGRADED_MONITORING_TIMEOUT: ${POSTGRES_DEGRADED_MONITORING_TIMEOUT} volumes: - - 'hapi-postgres-2-data:/bitnami/postgresql' + - "hapi-postgres-2-data:/bitnami/postgresql" configs: - target: /bitnami/postgresql/conf/conf.d/custom_postgresql.conf source: postgresql.conf deploy: placement: constraints: - - "node.labels.name==node-2" + - "node.labels.name==${POSTGRES_2_PLACEMENT}" replicas: 1 resources: limits: @@ -58,14 +58,14 @@ services: REPMGR_FAILOVER: ${POSTGRES_FAILOVER} REPMGR_DEGRADED_MONITORING_TIMEOUT: ${POSTGRES_DEGRADED_MONITORING_TIMEOUT} volumes: - - 'hapi-postgres-3-data:/bitnami/postgresql' + - "hapi-postgres-3-data:/bitnami/postgresql" configs: - target: /bitnami/postgresql/conf/conf.d/custom_postgresql.conf source: postgresql.conf deploy: placement: constraints: - - "node.labels.name==node-3" + - "node.labels.name==${POSTGRES_3_PLACEMENT}" replicas: 1 resources: limits: diff --git a/database-postgres/docker-compose-postgres.yml b/database-postgres/docker-compose-postgres.yml index 2a24c1ea..b5165127 100644 --- a/database-postgres/docker-compose-postgres.yml +++ b/database-postgres/docker-compose-postgres.yml @@ -2,7 +2,7 @@ version: "3.9" services: postgres-1: - image: bitnami/postgresql-repmgr:14 + image: ${POSTGRES_IMAGE} environment: POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} REPMGR_NODE_NETWORK_NAME: postgres-1 diff --git a/database-postgres/package-metadata.json b/database-postgres/package-metadata.json index bc6c441b..c7240670 100644 --- a/database-postgres/package-metadata.json +++ b/database-postgres/package-metadata.json @@ -9,6 +9,14 @@ "REPMGR_PRIMARY_HOST": "postgres-1", "REPMGR_PARTNER_NODES": "postgres-1", "REPMGR_PASSWORD": "instant101", + "POSTGRES_IMAGE": "bitnami/postgresql-repmgr:14", + "POSTGRES_1_PLACEMENT": "node-1", + "POSTGRES_2_PLACEMENT": "node-2", + "POSTGRES_3_PLACEMENT": "node-3", + "PG_POOL_IMAGE": "bitnami/pgpool:4.4.3", + "PGPOOL_1_PLACEMENT": "node-1", + "PGPOOL_2_PLACEMENT": "node-2", + "PGPOOL_3_PLACEMENT": "node-3", "POSTGRES_REPLICA_SET": "postgres-1:5432", "POSTGRES_CPU_LIMIT": "0", "POSTGRES_CPU_RESERVE": "0.05", diff --git a/monitoring/docker-compose.cluster.yml b/monitoring/docker-compose.cluster.yml index e627fa1c..87eb7d62 100644 --- a/monitoring/docker-compose.cluster.yml +++ b/monitoring/docker-compose.cluster.yml @@ -1,20 +1,20 @@ -version: '3.9' +version: "3.9" services: prometheus: deploy: placement: constraints: - - "node.labels.name==node-1" + - "node.labels.name==${PROMETHEUS_PLACEMENT}" replicas: 1 prometheus_backup: - image: prom/prometheus:v2.38.0 + image: ${PROMETHEUS_BACKUP_IMAGE} user: root deploy: placement: constraints: - - "node.labels.name!=node-1" + - "node.labels.name!=${PROMETHEUS_BACKUP_PLACEMENT}" replicas: 1 volumes: - prometheus_data_backup:/prometheus @@ -23,11 +23,11 @@ services: - target: /etc/prometheus/prometheus.yml source: prometheus.yml command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--storage.tsdb.path=/prometheus' - - '--web.console.libraries=/etc/prometheus/console_libraries' - - '--web.console.templates=/etc/prometheus/consoles' - - '--web.enable-lifecycle' + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--web.console.libraries=/etc/prometheus/console_libraries" + - "--web.console.templates=/etc/prometheus/consoles" + - "--web.enable-lifecycle" networks: public: default: @@ -36,23 +36,17 @@ services: deploy: placement: constraints: - - "node.labels.name==node-1" + - "node.labels.name==${MINIO_01_PLACEMENT}" minio-02: - image: quay.io/minio/minio:RELEASE.2022-10-24T18-35-07Z + image: ${MINIO_IMAGE} entrypoint: sh command: -c 'mkdir -p /data1/loki /data2/loki && minio server --console-address ":9001" http://minio-0{1...4}/data{1...2}' environment: MINIO_ROOT_USER: ${MO_SECURITY_ADMIN_USER} MINIO_ROOT_PASSWORD: ${MO_SECURITY_ADMIN_PASSWORD} healthcheck: - test: - [ - "CMD", - "curl", - "-f", - "http://localhost:9000/minio/health/live" - ] + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 @@ -63,24 +57,18 @@ services: deploy: placement: constraints: - - "node.labels.name==node-1" + - "node.labels.name==${MINIO_02_PLACEMENT}" replicas: 1 minio-03: - image: quay.io/minio/minio:RELEASE.2022-10-24T18-35-07Z + image: ${MINIO_IMAGE} entrypoint: sh command: -c 'mkdir -p /data1/loki /data2/loki && minio server --console-address ":9001" http://minio-0{1...4}/data{1...2}' environment: MINIO_ROOT_USER: ${MO_SECURITY_ADMIN_USER} MINIO_ROOT_PASSWORD: ${MO_SECURITY_ADMIN_PASSWORD} healthcheck: - test: - [ - "CMD", - "curl", - "-f", - "http://localhost:9000/minio/health/live" - ] + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 @@ -91,24 +79,18 @@ services: deploy: placement: constraints: - - "node.labels.name==node-2" + - "node.labels.name==${MINIO_03_PLACEMENT}" replicas: 1 minio-04: - image: quay.io/minio/minio:RELEASE.2022-10-24T18-35-07Z + image: ${MINIO_IMAGE} entrypoint: sh command: -c 'mkdir -p /data1/loki /data2/loki && minio server --console-address ":9001" http://minio-0{1...4}/data{1...2}' environment: MINIO_ROOT_USER: ${MO_SECURITY_ADMIN_USER} MINIO_ROOT_PASSWORD: ${MO_SECURITY_ADMIN_PASSWORD} healthcheck: - test: - [ - "CMD", - "curl", - "-f", - "http://localhost:9000/minio/health/live" - ] + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 @@ -119,7 +101,7 @@ services: deploy: placement: constraints: - - "node.labels.name==node-3" + - "node.labels.name==${MINIO_04_PLACEMENT}" replicas: 1 volumes: diff --git a/monitoring/docker-compose.yml b/monitoring/docker-compose.yml index 0e1b34e0..1cf1db73 100644 --- a/monitoring/docker-compose.yml +++ b/monitoring/docker-compose.yml @@ -1,8 +1,8 @@ -version: '3.9' +version: "3.9" services: grafana: - image: grafana/grafana-oss:9.2.3 + image: ${GRAFANA_IMAGE} volumes: - grafana-data:/var/lib/grafana environment: @@ -71,12 +71,12 @@ services: - target: /etc/prometheus/prometheus.yml source: prometheus.yml command: - - '--config.file=/etc/prometheus/prometheus.yml' - - '--storage.tsdb.path=/prometheus' - - '--web.console.libraries=/etc/prometheus/console_libraries' - - '--web.console.templates=/etc/prometheus/consoles' - - '--web.enable-lifecycle' - - '--storage.tsdb.retention.time=${MO_RETENTION_TIME}' + - "--config.file=/etc/prometheus/prometheus.yml" + - "--storage.tsdb.path=/prometheus" + - "--web.console.libraries=/etc/prometheus/console_libraries" + - "--web.console.templates=/etc/prometheus/consoles" + - "--web.enable-lifecycle" + - "--storage.tsdb.retention.time=${MO_RETENTION_TIME}" networks: public: default: @@ -97,14 +97,14 @@ services: image: quay.io/prometheus/node-exporter:v1.3.1 hostname: "{{.Node.ID}}" command: - - '--path.rootfs=/host' + - "--path.rootfs=/host" volumes: - - '/:/host:ro,rslave' + - "/:/host:ro,rslave" deploy: mode: global loki: - image: grafana/loki:2.6.1 + image: ${LOKI_IMAGE} volumes: - loki-data:/tmp/loki environment: @@ -120,7 +120,7 @@ services: - prometheus-address=loki:3100 promtail: - image: grafana/promtail:2.6.1 + image: ${PROMTAIL_IMAGE} volumes: - /var/lib/docker/containers:/host/containers - /var/log:/var/log:ro @@ -139,13 +139,7 @@ services: MINIO_ROOT_USER: ${MO_SECURITY_ADMIN_USER} MINIO_ROOT_PASSWORD: ${MO_SECURITY_ADMIN_PASSWORD} healthcheck: - test: - [ - "CMD", - "curl", - "-f", - "http://localhost:9000/minio/health/live" - ] + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 diff --git a/monitoring/package-metadata.json b/monitoring/package-metadata.json index fd3b14e6..8d081fcd 100644 --- a/monitoring/package-metadata.json +++ b/monitoring/package-metadata.json @@ -6,6 +6,17 @@ "version": "0.0.1", "dependencies": [], "environmentVariables": { + "GRAFANA_IMAGE": "grafana/grafana-oss:9.2.3", + "LOKI_IMAGE": "grafana/loki:2.6.1", + "PROMTAIL_IMAGE": "grafana/promtail:2.6.1", + "PROMETHEUS_BACKUP_IMAGE": "prom/prometheus:v2.38.0", + "MINIO_IMAGE": "quay.io/minio/minio:RELEASE.2022-10-24T18-35-07Z", + "PROMETHEUS_PLACEMENT": "node-1", + "PROMETHEUS_BACKUP_PLACEMENT": "node-1", + "MINIO_01_PLACEMENT": "node-1", + "MINIO_02_PLACEMENT": "node-1", + "MINIO_03_PLACEMENT": "node-2", + "MINIO_04_PLACEMENT": "node-3", "GF_SECURITY_ADMIN_USER": "admin", "GF_SECURITY_ADMIN_PASSWORD": "dev_password_only", "GF_SMTP_ENABLED": "false",