diff --git a/.env.cluster b/.env.cluster index 0c501267..1e4b52c6 100644 --- a/.env.cluster +++ b/.env.cluster @@ -82,7 +82,8 @@ GF_SERVER_DOMAIN=grafana.domain # Client Registry - JeMPI JEMPI_WEB_INSTANCES=3 -REACT_APP_JEMPI_BASE_URL=https://jempi-api.domain/JeMPI +REACT_APP_JEMPI_BASE_API_HOST=https://jempi-api.domain +REACT_APP_JEMPI_BASE_API_PORT=50000 JEMPI_SESSION_SECURE=true JEMPI_REPMGR_PARTNER_NODES=jempi-postgresql-01,jempi-postgresql-02,jempi-postgresql-03 JEMPI_ASYNC_RECEIVER_INSTANCES=1 diff --git a/.github/workflows/ci-cluster.yml b/.github/workflows/ci-cluster.yml index c3c3386d..0eae1e68 100644 --- a/.github/workflows/ci-cluster.yml +++ b/.github/workflows/ci-cluster.yml @@ -72,7 +72,7 @@ jobs: run-e2e-tests: runs-on: ubuntu-20.04 needs: configure-e2e-server - timeout-minutes: 80 + timeout-minutes: 120 steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index d58c35d2..1568ad93 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -9,25 +9,19 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v2 - - name: Set up Go - uses: actions/setup-go@v2 - with: - go-version: 1.17 + # replaces latest with the version of the release in the config.yaml file + - name: Set version + run: | + sed -i "s/latest/${{ github.event.release.tag_name }}/g" config.yaml - - name: Build Releases - run: ./get-cli.sh - - - name: Test binaries - run: ./instant-linux help - - - name: Release - uses: softprops/action-gh-release@v1 - with: - files: | - ./instant-linux - ./instant-macos - ./instant.exe - ./config.yml - ./banner.txt + - name: Release + uses: softprops/action-gh-release@v1 + with: + files: | + ./config.yml + ./banner.txt + ./cdr-dw.env + ./cdr.env + ./mpi.env diff --git a/.github/workflows/run-tests.sh b/.github/workflows/run-tests.sh index 2c6a5831..a144aa89 100755 --- a/.github/workflows/run-tests.sh +++ b/.github/workflows/run-tests.sh @@ -8,6 +8,9 @@ CHANGED_FILES=($@) cd ../../test/cucumber/ || exit +# This ensures that the openhim and its mediators' tests are run only once when the openhim and its mediators have all been modified +openhimRan="false" + declare -A changed_packages for package in "${CHANGED_FILES[@]}"; do if [[ $package == *"features/cluster-mode"* ]]; then @@ -28,12 +31,10 @@ elif [[ "${!changed_packages[*]}" == *"features/single-mode"* ]] && [[ $NODE_MOD DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:single elif [[ "${!changed_packages[*]}" == *"features/cluster-mode"* ]] && [[ $NODE_MODE == "cluster" ]]; then DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:cluster -elif [[ "${!changed_packages[*]}" == *"infrastructure"* ]]; then - DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE" +elif [[ "${!changed_packages[*]}" == *"infrastructure"* ]] && [[ $openhimRan == "false" ]]; then + openhimRan="true" + DOCKER_HOST=ssh://ubuntu@$GITHUB_RUN_ID.jembi.cloud yarn test:"$NODE_MODE":openhim else - # This ensures that the openhim and its mediators' tests are run only once when the openhim and its mediators have all been modified - openhimRan="false" - for folder_name in "${!changed_packages[@]}"; do echo "$folder_name was changed" diff --git a/.vscode/settings.json b/.vscode/settings.json index 3f6e69e1..8d187f9c 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -6,7 +6,7 @@ "editor.defaultFormatter": "esbenp.prettier-vscode" }, "editor.codeActionsOnSave": { - "source.fixAll": true + "source.fixAll": "explicit" }, "json.schemas": [ { diff --git a/Dockerfile b/Dockerfile index 44d3cef5..e33f100c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM openhie/package-base:2.1.3 +FROM openhie/package-base:2.2.0 # Install yq RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.23.1/yq_linux_amd64 -o /usr/bin/yq diff --git a/analytics-datastore-clickhouse/importer/config/clickhouseTables.js b/analytics-datastore-clickhouse/importer/config/clickhouseTables.js index 8c3ba4e7..3603beda 100644 --- a/analytics-datastore-clickhouse/importer/config/clickhouseTables.js +++ b/analytics-datastore-clickhouse/importer/config/clickhouseTables.js @@ -1,22 +1,52 @@ -const CLUSTERED_MODE = process.env.CLUSTERED_MODE || 'true'; +const CLUSTERED_MODE = process.env.CLUSTERED_MODE || "false"; const queries = - Boolean(CLUSTERED_MODE) === true + CLUSTERED_MODE === "true" ? [ - `CREATE TABLE default_table( - createdAt Date, - updatedAt Date - ) - ENGINE=MergeTree - ORDER BY tuple();`, + `CREATE TABLE patient_example ON CLUSTER '{cluster}' ( + id String, + version String NULL, + inserted_at DateTime DEFAULT now(), + last_updated Date NULL, + goldenId String, + patientGivenName String, + patientFamilyName String, + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/{table}', '{replica}') + ORDER BY tuple();`, + `CREATE TABLE observation_example ON CLUSTER '{cluster}' ( + id String, + version String NULL, + inserted_at DateTime DEFAULT now(), + last_updated Date NULL, + observationValue Double, + patientId String, + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/{table}', '{replica}') + ORDER BY tuple();`, ] - : [ - `CREATE TABLE default.default_table ON CLUSTER '{cluster}' ( - createdAt Date, - updatedAt Date - ) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/{table}', '{replica}') - ORDER BY tuple();`, + : [ + `CREATE TABLE patient_example( + id String, + version String NULL, + inserted_at DateTime DEFAULT now(), + last_updated Date NULL, + goldenId String, + patientGivenName String, + patientFamilyName String, + ) + ENGINE=MergeTree + ORDER BY tuple();`, + `CREATE TABLE observation_example( + id String, + version String NULL, + inserted_at DateTime DEFAULT now(), + last_updated Date NULL, + observationValue Double, + patientId String, + ) + ENGINE=MergeTree + ORDER BY tuple();`, ]; module.exports = queries; diff --git a/cdr-dw.env b/cdr-dw.env new file mode 100644 index 00000000..e9e3a6f5 --- /dev/null +++ b/cdr-dw.env @@ -0,0 +1,20 @@ +# General +CLUSTERED_MODE=false + +# Log +DEBUG=0 +BASHLOG_FILE=0 +BASHLOG_FILE_PATH=platform.log + +# Message Bus - Kafka +# !NOTE: Topics should comma seperated, optional include partion and repliction values +# e.g. :: -> test:3:2 (defaults to :3:1) +KAFKA_TOPICS=2xx,2xx-async,errors,reprocess,3xx,patient,observation,JeMPI-audit-trail + +# SSO +KC_OPENHIM_SSO_ENABLED=true +OPENHIM_CONSOLE_SHOW_LOGIN=false +KC_JEMPI_SSO_ENABLED=true +REACT_APP_JEMPI_BASE_API_PORT=50001 +KC_SUPERSET_SSO_ENABLED=true +KC_GRAFANA_SSO_ENABLED=true diff --git a/cdr.env b/cdr.env new file mode 100644 index 00000000..0c098b91 --- /dev/null +++ b/cdr.env @@ -0,0 +1,19 @@ +# General +CLUSTERED_MODE=false + +# Log +DEBUG=0 +BASHLOG_FILE=0 +BASHLOG_FILE_PATH=platform.log + +# Message Bus - Kafka +# !NOTE: Topics should comma seperated, optional include partion and repliction values +# e.g. :: -> test:3:2 (defaults to :3:1) +KAFKA_TOPICS=2xx,2xx-async,errors,reprocess,3xx,patient,observation,JeMPI-audit-trail + +# SSO +KC_OPENHIM_SSO_ENABLED=true +OPENHIM_CONSOLE_SHOW_LOGIN=false +KC_JEMPI_SSO_ENABLED=true +REACT_APP_JEMPI_BASE_API_PORT=50001 +KC_GRAFANA_SSO_ENABLED=true diff --git a/client-registry-jempi/README.md b/client-registry-jempi/README.md new file mode 100644 index 00000000..acc247ae --- /dev/null +++ b/client-registry-jempi/README.md @@ -0,0 +1,274 @@ + +# JeMPI Client Registry Component - docker-swarm + +This component consists of two services: + +* JeMPI Web UI - http://localhost:3033 +* JeMPI API - http://localhost:50000/JeMPI + +## Api endpoints + +> This service uses the openhim mapping mediator to map fhir formated patients into the JeMPI format + +### Registering a patient + +via the api (in JeMPI format) + +```sh +POST - http://localhost:50000/JeMPI/cr-register + +{ + "candidateThreshold": 0.9, + "sourceId": { + "facility": "fac1", + "patient": "pat1" + }, + "uniqueInteractionData": { + "auxDateCreated": "2016-10-30T14:22:25.285", + "auxId" : "1234", + "auxClinicalData" : "SOME DATA" + }, + "demographicData": { + "givenName": "XXX", + "familyName": "YYY", + "gender": "female", + "dob": "20000101", + "phoneNumber": "123456789", + "city": "Cape Town", + "nationalId": "1234567890" + } +} +``` + +via the [mapping mediator](https://github.com/jembi/openhim-mediator-mapping) (in fhir format) + +```sh + +POST http://localhost:3003/fhir/Patient + +The `candidateThreshold` can optionally be set in the request query. The default value is 0.9 + +{ + "resourceType": "Patient", + "gender": "male", + "birthDate": "1968-04-15", + "name": [ + { + "family": "cread", + "given": [ + "Jacess" + ] + } + ], + "address": [ + { + "city": "Indianapolis" + } + ], + "identifier": [ + { + "system": "https://instantopenhie.org/client1", + "value": "6b4573e7-f9dc-49ea-9ebb-daaa6b74a534" + }, + { + "value": "60934be6-ce88-48af-958e-02d88f77eec9", + "system": "NationalID" + } + ], + "telecom": [ + { + "value": "899-882-4991", + "system": "phone" + } + ] +} +``` +> The identifier with the system 'NationalID' maps to the 'nationalId' property in JeMPI + +## Querying a patient by id + +via the api (returns patient in JeMPI formated) + +```sh +GET - http://localhost:50000/JeMPI/expanded-golden-record/ +``` + +via the [mapping mediator](https://github.com/jembi/openhim-mediator-mapping) (returns patient in fhir format) + +```sh +GET - http://localhost:3003/fhir/Patient/ +``` + +## Updating a patient + +via the api (in JeMPI format) + +```sh +PATCH - http://localhost:50000/JeMPI/cr-update-fields + +{ + "goldenId": "0x5", + "fields": [ + { + "name": "givenName", + "value": "xxx" + }, + { + "name": "familyName", + "value": "yyy" + } + ] +} +``` + +via the [mapping mediator](https://github.com/jembi/openhim-mediator-mapping) (in fhir format) + +```sh +PUT - http://localhost:3003/fhir/Patient/ + +{ + "resourceType": "Patient", + "gender": "male", + "birthDate": "1968-04-15", + "name": [ + { + "family": "cread", + "given": [ + "Jacess" + ] + } + ], + "address": [ + { + "city": "Indianapolis" + } + ], + "identifier": [ + { + "system": "https://instantopenhie.org/client1", + "value": "6b4573e7-f9dc-49ea-9ebb-daaa6b74a534" + }, + { + "value": "60934be6-ce88-48af-958e-02d88f77eec9", + "system": "NationalID" + } + ], + "telecom": [ + { + "value": "899-882-4991", + "system": "phone" + } + ] +} +``` + +## Query all patients deterministic + +via the api (returns in JeMPI format) + +```sh +POST http://localhost:50000/JeMPI/cr-find + +{ + "operand": { + "fn": "eq", + "name": "givenName", + "value": "xxx" + }, + "operands": [ + { + "operator": "and", + "operand": { + "fn": "eq", + "name": "familyName", + "value": "yyy" + } + } + ] +} +``` + +via the [mapping mediator](https://github.com/jembi/openhim-mediator-mapping) (in fhir format) + +```sh +GET http://localhost:3003/fhir/Patient + +Query parameters - family, given, telecom, identifier, gender, birthDate, address (city) +``` + +## Query patients probabilistic + +via the api (in JeMPI format) + +```sh +POST - http://localhost:50000/JeMPI/cr-find + +{ + "operand": { + "fn": "match", + "name": "givenName", + "value": "drake", + "distance": 2 + }, + "operands": [ + { + "operator": "and", + "operand": { + "fn": "match", + "name": "familyName", + "value": "brake", + "distance": 2 + } + } + ] +} +``` + +via the [mapping mediator](https://github.com/jembi/openhim-mediator-mapping) (in fhir format) + +```sh + +POST http://localhost:3003/fhir/Patient/$match + +{ + "resourceType": "Parameters", + "parameter": [ + { + "name": "givenName", + "valueString": "drake", + "part": [ + { + "name": "operator", + "valueString": "and" + }, + { + "name": "fn", + "valueString": "match" + }, + { + "name": "distance", + "valueInteger": 2 + } + ] + }, + { + "name": "familyName", + "valueString": "brake", + "part": [ + { + "name": "operator", + "valueString": "and" + }, + { + "name": "fn", + "valueString": "match" + }, + { + "name": "distance", + "valueInteger": 2 + } + ] + } + ] +} +``` diff --git a/client-registry-jempi/docker-compose.api-dev.yml b/client-registry-jempi/docker-compose.api-dev.yml index 30be974e..eb7ea7f6 100644 --- a/client-registry-jempi/docker-compose.api-dev.yml +++ b/client-registry-jempi/docker-compose.api-dev.yml @@ -6,3 +6,9 @@ services: - published: 50000 target: 50000 mode: host + + jempi-api-kc: + ports: + - published: 50001 + target: 50000 + mode: host diff --git a/client-registry-jempi/docker-compose.api.yml b/client-registry-jempi/docker-compose.api.yml index 7f32f9db..e5a0da2b 100644 --- a/client-registry-jempi/docker-compose.api.yml +++ b/client-registry-jempi/docker-compose.api.yml @@ -4,17 +4,61 @@ services: jempi-api: image: jembi/jempi-api:${JEMPI_API_IMAGE_TAG} environment: + LOG4J2_LEVEL: ${LOG4J2_LEVEL} + POSTGRESQL_IP: ${JEMPI_REPMGR_PRIMARY_HOST} + POSTGRESQL_PORT: 5432 + POSTGRESQL_DATABASE: ${POSTGRESQL_DATABASE} + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_HOSTS} + KAFKA_APPLICATION_ID: ${KAFKA_APPLICATION_ID_API} + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + API_HTTP_PORT: 50000 + LINKER_IP: jempi-linker + LINKER_HTTP_PORT: 50000 + volumes: + - "jempi-shared-data:/app/csv" + deploy: + replicas: ${JEMPI_API_INSTANCES} + resources: + limits: + memory: ${JEMPI_API_MEMORY_LIMIT} + reservations: + memory: ${JEMPI_API_MEMORY_RESERVE} + networks: + reverse-proxy: + kafka: + default: + + + jempi-api-kc: + image: jembi/jempi-api-kc:${JEMPI_API_KC_IMAGE_TAG} + environment: + LOG4J2_LEVEL: ${LOG4J2_LEVEL} KC_REALM_NAME: ${KC_REALM_NAME} + KC_API_URL: ${KC_API_URL} KC_JEMPI_CLIENT_ID: ${KC_JEMPI_CLIENT_ID} KC_JEMPI_CLIENT_SECRET: ${KC_JEMPI_CLIENT_SECRET} - KC_API_URL: ${KC_API_URL} KC_JEMPI_ROOT_URL: ${KC_JEMPI_ROOT_URL} + KC_FRONTEND_URL: ${KC_FRONTEND_URL} JEMPI_SESSION_SECRET: ${JEMPI_SESSION_SECRET} - kafka.bootstrap.servers: ${KAFKA_HOSTS} - JEMPI_FILE_IMPORT_MAX_SIZE_BYTE: ${JEMPI_FILE_IMPORT_MAX_SIZE_BYTE} JEMPI_SESSION_SECURE: ${JEMPI_SESSION_SECURE} - JEMPI_SESSION_DOMAIN_NAME: ${DOMAIN_NAME} - postgres.server: ${JEMPI_REPMGR_PARTNER_NODES} + JEMPI_SESSION_DOMAIN_NAME: ${JEMPI_SESSION_DOMAIN_NAME} + POSTGRESQL_IP: ${JEMPI_REPMGR_PRIMARY_HOST} + POSTGRESQL_PORT: 5432 + POSTGRESQL_DATABASE: ${POSTGRESQL_DATABASE} + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_HOSTS} + KAFKA_APPLICATION_ID: ${KAFKA_APPLICATION_ID_API} + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + API_KC_HTTP_PORT: 50000 + LINKER_IP: jempi-linker + LINKER_HTTP_PORT: 50000 + volumes: + - "jempi-shared-data:/app/csv" deploy: replicas: ${JEMPI_API_INSTANCES} resources: @@ -24,18 +68,18 @@ services: memory: ${JEMPI_API_MEMORY_RESERVE} networks: reverse-proxy: - keycloak: kafka: default: +volumes: + jempi-shared-data: + + networks: reverse-proxy: name: reverse-proxy_public external: true - keycloak: - name: keycloak_public - external: true kafka: name: kafka_public external: true diff --git a/client-registry-jempi/docker-compose.combined-dev.yml b/client-registry-jempi/docker-compose.combined-dev.yml index 1f33a0f1..80d14f9c 100644 --- a/client-registry-jempi/docker-compose.combined-dev.yml +++ b/client-registry-jempi/docker-compose.combined-dev.yml @@ -1,12 +1,6 @@ version: '3.9' services: - jempi-sync-receiver: - ports: - - published: 50040 - target: 50000 - protocol: tcp - mode: host jempi-controller: ports: diff --git a/client-registry-jempi/docker-compose.combined.yml b/client-registry-jempi/docker-compose.combined.yml index dd345eb8..02a239cf 100644 --- a/client-registry-jempi/docker-compose.combined.yml +++ b/client-registry-jempi/docker-compose.combined.yml @@ -4,7 +4,11 @@ services: jempi-async-receiver: image: jembi/jempi-async-receiver:${JEMPI_ASYNC_RECEIVER_IMAGE_TAG} environment: - kafka.bootstrap.servers: ${KAFKA_HOSTS} + LOG4J2_LEVEL: ${LOG4J2_LEVEL} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_HOSTS} + KAFKA_CLIENT_ID: ${KAFKA_CLIENT_ID_ASYNC} + volumes: + - "jempi-shared-data:/app/csv" deploy: replicas: ${JEMPI_ASYNC_RECEIVER_INSTANCES} resources: @@ -16,40 +20,42 @@ services: kafka: default: - jempi-sync-receiver: - image: jembi/jempi-sync-receiver:${JEMPI_SYNC_RECEIVER_IMAGE_TAG} - environment: - kafka.bootstrap.servers: ${KAFKA_HOSTS} - deploy: - replicas: ${JEMPI_SYNC_RECEIVER_INSTANCES} - resources: - limits: - memory: ${JEMPI_SYNC_RECEIVER_MEMORY_LIMIT} - reservations: - memory: ${JEMPI_SYNC_RECEIVER_MEMORY_RESERVE} - networks: - kafka: - default: - jempi-pre-processor: - image: jembi/jempi-pre-processor:${JEMPI_PRE_PROCESSOR_IMAGE_TAG} + jempi-etl: + image: jembi/jempi-etl:${JEMPI_ETL_IMAGE_TAG} environment: - kafka.bootstrap.servers: ${KAFKA_HOSTS} + LOG4J2_LEVEL: ${LOG4J2_LEVEL} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_HOSTS} + KAFKA_APPLICATION_ID: ${KAFKA_APPLICATION_ID_ETL} deploy: - replicas: ${JEMPI_PRE_PROCESSOR_INSTANCES} + replicas: ${JEMPI_ETL_INSTANCES} resources: limits: - memory: ${JEMPI_PRE_PROCESSOR_MEMORY_LIMIT} + memory: ${JEMPI_ETL_MEMORY_LIMIT} reservations: - memory: ${JEMPI_PRE_PROCESSOR_MEMORY_RESERVE} + memory: ${JEMPI_ETL_MEMORY_RESERVE} networks: kafka: default: + jempi-controller: image: jembi/jempi-controller:${JEMPI_CONTROLLER_IMAGE_TAG} environment: - kafka.bootstrap.servers: ${KAFKA_HOSTS} + LOG4J2_LEVEL: ${LOG4J2_LEVEL} + POSTGRESQL_IP: ${JEMPI_REPMGR_PRIMARY_HOST} + POSTGRESQL_PORT: 5432 + POSTGRESQL_DATABASE: ${POSTGRESQL_DATABASE} + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_HOSTS} + KAFKA_APPLICATION_ID: ${KAFKA_APPLICATION_ID_CTRL} + KAFKA_CLIENT_ID: ${KAFKA_CLIENT_ID_CTRL} + CONTROLLER_HTTP_PORT: 50000 + API_IP: jempi-api + API_HTTP_PORT: 50000 + LINKER_IP: jempi-linker + LINKER_HTTP_PORT: 50000 deploy: replicas: ${JEMPI_CONTROLLER_INSTANCES} resources: @@ -61,25 +67,27 @@ services: kafka: default: - jempi-em-calculator: - image: jembi/jempi-em-calculator:${JEMPI_EM_CALCULATOR_IMAGE_TAG} - environment: - kafka.bootstrap.servers: ${KAFKA_HOSTS} - deploy: - replicas: ${JEMPI_EM_CALCULATOR_INSTANCES} - resources: - limits: - memory: ${JEMPI_EM_CALCULATOR_MEMORY_LIMIT} - reservations: - memory: ${JEMPI_EM_CALCULATOR_MEMORY_RESERVE} - networks: - kafka: - default: jempi-linker: image: jembi/jempi-linker:${JEMPI_LINKER_IMAGE_TAG} environment: - kafka.bootstrap.servers: ${KAFKA_HOSTS} + LOG4J2_LEVEL: ${LOG4J2_LEVEL} + POSTGRESQL_IP: ${JEMPI_REPMGR_PRIMARY_HOST} + POSTGRESQL_PORT: 5432 + POSTGRESQL_DATABASE: ${POSTGRESQL_DATABASE} + POSTGRESQL_USER: ${POSTGRESQL_USERNAME} + POSTGRESQL_PASSWORD: ${POSTGRESQL_PASSWORD} + KAFKA_BOOTSTRAP_SERVERS: ${KAFKA_HOSTS} + KAFKA_APPLICATION_ID_INTERACTIONS: ${KAFKA_APPLICATION_ID_INTERACTIONS} + KAFKA_APPLICATION_ID_MU: ${KAFKA_APPLICATION_ID_MU} + KAFKA_CLIENT_ID_NOTIFICATIONS: ${KAFKA_CLIENT_ID_NOTIFICATIONS} + DGRAPH_HOSTS: ${DGRAPH_HOSTS} + DGRAPH_PORTS: ${DGRAPH_PORTS} + LINKER_HTTP_PORT: 50000 + API_IP: jempi-api + API_HTTP_PORT: 50000 + LINKER_MATCH_THRESHOLD: ${LINKER_MATCH_THRESHOLD} + LINKER_MATCH_THRESHOLD_MARGIN: ${LINKER_MATCH_THRESHOLD_MARGIN} deploy: replicas: ${JEMPI_LINKER_INSTANCES} resources: @@ -91,6 +99,7 @@ services: kafka: default: + jempi-postgresql-01: image: bitnami/postgresql-repmgr:15.2.0 environment: @@ -117,9 +126,11 @@ services: configs: - target: /docker-entrypoint-initdb.d/jempi_psql_init_db.sql source: jempi_psql_init_db.sql - + volumes: jempi-psql-01-data: + jempi-shared-data: + networks: kafka: @@ -127,6 +138,7 @@ networks: external: true defualt: + configs: jempi_psql_init_db.sql: file: ./importer/jempi_psql_init_db.sql diff --git a/client-registry-jempi/docker-compose.web.yml b/client-registry-jempi/docker-compose.web.yml index 53a7b5ba..f1b5e092 100644 --- a/client-registry-jempi/docker-compose.web.yml +++ b/client-registry-jempi/docker-compose.web.yml @@ -4,12 +4,15 @@ services: jempi-web: image: jembi/jempi-web:${JEMPI_WEB_VERSION} environment: - REACT_APP_JEMPI_BASE_URL: ${REACT_APP_JEMPI_BASE_URL} - REACT_APP_MOCK_BACKEND: ${REACT_APP_MOCK_BACKEND} - REACT_APP_KC_FRONTEND_URL: ${KC_FRONTEND_URL} - REACT_APP_KC_REALM_NAME: ${KC_REALM_NAME} - REACT_APP_KC_JEMPI_CLIENT_ID: ${KC_JEMPI_CLIENT_ID} - command: sh -c "yarn build && serve -s build" + REACT_APP_JEMPI_BASE_API_HOST: ${REACT_APP_JEMPI_BASE_API_HOST} + REACT_APP_JEMPI_BASE_API_PORT: ${REACT_APP_JEMPI_BASE_API_PORT} + REACT_APP_ENABLE_SSO: ${KC_JEMPI_SSO_ENABLED} + REACT_APP_MAX_UPLOAD_CSV_SIZE_IN_MEGABYTES: 128 + NODE_ENV: production + KC_FRONTEND_URL: ${KC_FRONTEND_URL} + KC_REALM_NAME: ${KC_REALM_NAME} + KC_JEMPI_CLIENT_ID: ${KC_JEMPI_CLIENT_ID} + REACT_APP_SHOW_BRAND_LOGO: "false" deploy: replicas: ${JEMPI_WEB_INSTANCES} placement: @@ -24,6 +27,7 @@ services: keycloak: default: + networks: reverse-proxy: name: reverse-proxy_public diff --git a/client-registry-jempi/importer/jempi_psql_init_db.sql b/client-registry-jempi/importer/jempi_psql_init_db.sql index 705ec1a1..34b2cd61 100644 --- a/client-registry-jempi/importer/jempi_psql_init_db.sql +++ b/client-registry-jempi/importer/jempi_psql_init_db.sql @@ -19,13 +19,15 @@ CREATE TABLE IF NOT EXISTS Notification_State CREATE TABLE IF NOT EXISTS Notification ( Id uuid DEFAULT gen_random_uuid() PRIMARY KEY, - Type_Id uuid, + Type VARCHAR(50), Created date, Reviewd_By uuid, Reviewed_At timestamp without time zone, - State_Id uuid, + State VARCHAR(50), Patient_Id VARCHAR(50), - Names VARCHAR(100) + Names VARCHAR(100), + Golden_Id VARCHAR(50), + Score Numeric ); CREATE TABLE IF NOT EXISTS Action diff --git a/client-registry-jempi/importer/mapping-mediator/docker-compose.config.yml b/client-registry-jempi/importer/mapping-mediator/docker-compose.config.yml new file mode 100644 index 00000000..361e16e9 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/docker-compose.config.yml @@ -0,0 +1,104 @@ +version: '3.9' + +services: + mapping-mediator-config-importer: + image: jembi/instantohie-config-importer + deploy: + restart_policy: + condition: none + configs: + - source: mediator-config-metadata.js + target: /metadata.js + - source: mediator-config-register.json + target: /register.json + - source: mediator-config-register-response.json + target: /register-response.json + - source: mediator-config-update-response.json + target: /update-response.json + - source: mediator-config-update.json + target: /update.json + - source: mediator-config-search.json + target: /search.json + - source: mediator-config-searchAll.json + target: /searchAll.json + - source: mediator-config-patientLinks.json + target: /patientLinks.json + - source: mediator-config-patientLinksResponse.json + target: /patientLinksResponse.json + - source: mediator-config-searchAllResponse.json + target: /searchAllResponse.json + - source: mediator-config-searchAllProbabilistic.json + target: /searchAllProbabilistic.json + - source: mediator-config-searchAllProbabilisticResponse.json + target: /searchAllProbabilisticResponse.json + networks: + mapping-mediator: + # This command will only attempt to import the config when the uptime responds with a 2xx + command: sh -c "wait-on -t 60000 http-get://openhim-mapping-mediator:3003/uptime && node /metadata.js" + +configs: + mediator-config-metadata.js: + file: ./metadata.js + name: mediator-config-metadata.js-${mediator_config_metadata_js_DIGEST:?err} + labels: + name: jempi + mediator-config-register.json: + file: ./register.json + name: mediator-config-register.json-${mediator_config_register_json_DIGEST:?err} + labels: + name: jempi + mediator-config-register-response.json: + file: ./register-response.json + name: mediator-config-register-response.json-${mediator_config_register_response_json_DIGEST:?err} + labels: + name: jempi + mediator-config-update-response.json: + file: ./update-response.json + name: mediator-config-update-response.json-${mediator_config_update_response_json_DIGEST:?err} + labels: + name: jempi + mediator-config-update.json: + file: ./update.json + name: mediator-config-update.json-${mediator_config_update_json_DIGEST:?err} + labels: + name: jempi + mediator-config-search.json: + file: ./search.json + name: mediator-config-search.json-${mediator_config_search_json_DIGEST:?err} + labels: + name: jempi + mediator-config-searchAll.json: + file: ./searchAll.json + name: mediator-config-searchAll.json-${mediator_config_searchAll_json_DIGEST:?err} + labels: + name: jempi + mediator-config-patientLinks.json: + file: ./patientLinks.json + name: mediator-config-patientLinks.json-${mediator_config_patientLinks_json_DIGEST:?err} + labels: + name: jempi + mediator-config-patientLinksResponse.json: + file: ./patientLinksResponse.json + name: mediator-config-patientLinksResponse.json-${mediator_config_patientLinksResponse_json_DIGEST:?err} + labels: + name: jempi + mediator-config-searchAllResponse.json: + file: ./searchAllResponse.json + name: mediator-config-searchAllResponse.json-${mediator_config_searchAllResponse_json_DIGEST:?err} + labels: + name: jempi + mediator-config-searchAllProbabilistic.json: + file: ./searchAllProbabilistic.json + name: mediator-config-searchAllProbabilistic.json-${mediator_config_searchAllProbabilistic_json_DIGEST:?err} + labels: + name: jempi + mediator-config-searchAllProbabilisticResponse.json: + file: ./searchAllProbabilisticResponse.json + name: mediator-config-searchAllProbabilisticResponse.json-${mediator_config_searchAllProbabilisticResponse_json_DIGEST:?err} + labels: + name: jempi + +networks: + mapping-mediator: + name: openhim_mapping_mediator_public + external: true diff --git a/client-registry-jempi/importer/mapping-mediator/metadata.js b/client-registry-jempi/importer/mapping-mediator/metadata.js new file mode 100644 index 00000000..a04ba686 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/metadata.js @@ -0,0 +1,92 @@ +'use strict' + +const axios = require('axios') +const fs = require('fs') +const path = require('path') + +const MEDIATOR_HOSTNAME = process.env.MEDIATOR_HOST_NAME || 'openhim-mapping-mediator' +const MEDIATOR_API_PORT = process.env.MEDIATOR_API_PORT || 3003 + +// Function for sending the configuration +const sendRequest = async (data, method, endpointId) => { + const url = endpointId + ? `http://${MEDIATOR_HOSTNAME}:${MEDIATOR_API_PORT}/endpoints/${endpointId}` + : `http://${MEDIATOR_HOSTNAME}:${MEDIATOR_API_PORT}/endpoints` + + const options = { + url: url, + method: method, + headers: { + 'Content-Type': 'application/json' + }, + data: JSON.stringify(data) + } + + try { + const response = await axios(options) + + console.log( + `Successfully Imported OpenHIM Mediator Config.\n\nImport summary:${JSON.stringify( + response.data + )}` + ) + } catch (error) { + throw new Error( + `Failed to import OpenHIM Mediator config: ${error.message}` + ) + } +} + +const getEndpoints = async (callback) => { + const options = { + url: `http://${MEDIATOR_HOSTNAME}:${MEDIATOR_API_PORT}/endpoints`, + method: 'get', + headers: { + 'Content-Type': 'application/json' + } + } + + try { + const response = await axios(options) + callback(null, response.data) + } catch (error) { + callback( + new Error( + `Failed to fetch OpenHIM Mediator Mapping endpoints: ${error.response.data}` + ) + ) + } +} + +const importMetaData = async () => { + // get the endpoints list incase we need to do a update instead of a create + // If the endpoint already exists, perform an update + getEndpoints((_error, endpoints) => { + const dirPath = path.resolve(__dirname) + const files = fs.readdirSync(dirPath) + files.reduce((_acc, curr) => { + const jsonRegex = /(.*?(\bjson\b)[^$]*)/ + + if (curr.match(jsonRegex)) { + let method = 'post' + let endpointId = null + + const jsonData = JSON.parse( + fs.readFileSync(path.join(dirPath, curr), 'utf8') + ) + const matchingEndpoint = endpoints.filter( + (endpoint) => endpoint.endpoint.pattern === jsonData.endpoint.pattern + )[0] + + if (matchingEndpoint) { + endpointId = matchingEndpoint._id + method = 'put' + } + + sendRequest(jsonData, method, endpointId) + } + }, {}) + }) +} + +importMetaData() diff --git a/client-registry-jempi/importer/mapping-mediator/patientLinks.json b/client-registry-jempi/importer/mapping-mediator/patientLinks.json new file mode 100644 index 00000000..19afa55c --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/patientLinks.json @@ -0,0 +1,59 @@ +{ + "name": "Get patient links", + "endpoint": { + "pattern": "/fhir/links/Patient/:patientId", + "method": "GET" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputTransforms": { + "id": "lookupRequests.jempiSearch.data.goldenRecord.uid", + "gender": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.gender) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.gender : null", + "birthDate": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.dob) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.dob : null", + "name": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.givenName) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.givenName : null", + "lastName": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.familyName) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.familyName : null", + "city": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.city) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.city : null", + "identifierValue": "$exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId) and $exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId.patient) ? lookupRequests.jempiSearch.data.goldenRecord.sourceId.patient : null", + "identifierSystem": "$exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId) and $exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId.facility) ? lookupRequests.jempiSearch.data.goldenRecord.sourceId.facility : null", + "nationalId": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.nationalId) ? {'value': lookupRequests.jempiSearch.data.goldenRecord.demographicData.nationalId, 'system': constants.nidSystem} : null", + "phoneNumber": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.phoneNumber) ? {'value': lookupRequests.jempiSearch.data.goldenRecord.demographicData.phoneNumber, 'system': constants.phone} : null", + "link": "$append([], $map(lookupRequests.jempiSearch.data.interactionsWithScore, function($v) {{'other': {'reference': 'Patient/' & $v.interaction.uid}, 'type': 'refer'}}))" + }, + "inputMapping": { + "constants.resourceType": "resourceType", + "transforms.id": "id", + "transforms.gender": "gender", + "transforms.birthDate": "birthDate", + "transforms.lastName": "name[0].family", + "transforms.name": "name[0].given[0]", + "transforms.city": "address[0].city", + "transforms.identifierSystem": "identifier[0].system", + "transforms.identifierValue": "identifier[0].value", + "transforms.nationalId": "identifier[]+", + "transforms.phoneNumber": "telecom[]+", + "transforms.link": "link" + }, + "constants": { + "nidSystem": "NationalID", + "resourceType": "Patient", + "phone": "phone" + }, + "requests": { + "lookup": { + "id": "jempiSearch", + "config": { + "method": "get", + "url": "http://openhim-mapping-mediator:3003/response/links/:patientId", + "params": { + "url": { + "patientId": { + "path": "urlParams.patientId" + } + } + } + } + } + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/patientLinksResponse.json b/client-registry-jempi/importer/mapping-mediator/patientLinksResponse.json new file mode 100644 index 00000000..7ffd8cb8 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/patientLinksResponse.json @@ -0,0 +1,44 @@ +{ + "name": "Get patient links response", + "endpoint": { + "pattern": "/response/links/:patientId", + "method": "GET" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputTransforms": { + "goldenId": "$exists(lookupRequests.jempiSearchGoldenId.data[0].goldenRecordsWithScore[0].goldenRecord.uid) ? lookupRequests.jempiSearchGoldenId.data[0].goldenRecordsWithScore[0].goldenRecord.uid : ''" + }, + "requests": { + "response": { + "id": "jempiSearchResponse", + "config": { + "method": "get", + "url": "http://jempi-api:50000/JeMPI/expanded-golden-record/:goldenId", + "params": { + "url": { + "goldenId": { + "path": "transforms.goldenId" + } + } + } + } + }, + "lookup": { + "id": "jempiSearchGoldenId", + "config": { + "method": "get", + "url": "http://jempi-api:50000/JeMPI/expanded-interactions-csv", + "params": { + "query": { + "uidList": { + "path": "urlParams.patientId" + } + } + } + } + } + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/register-response.json b/client-registry-jempi/importer/mapping-mediator/register-response.json new file mode 100644 index 00000000..be9d87a1 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/register-response.json @@ -0,0 +1,44 @@ +{ + "name": "Register Patient response", + "endpoint": { + "pattern": "/register-response", + "method": "POST" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputTransforms": { + "currentDate": "$now()", + "sourcePatient": "$exists(requestBody.identifier) and $exists(requestBody.identifier[0]) and $exists(requestBody.identifier[0].value) ? {'patient': requestBody.identifier[0].value, 'facility': requestBody.identifier[0].system} : null", + "nationalId": "$exists(requestBody.identifier) and $exists(requestBody.identifier[0]) and $count($filter(requestBody.identifier, function($v) {$contains($v.system, 'NationalID')})) > 0 ? $filter(requestBody.identifier, function($v) {$contains($v.system, 'NationalID')})[0].value : null", + "name": "$exists(requestBody.name) and $exists(requestBody.name[0]) and $exists(requestBody.name[0].given[0]) ? requestBody.name[0].given[0] : null", + "familyName": "$exists(requestBody.name) and $exists(requestBody.name[0]) ? requestBody.name[0].family : null", + "city": "$exists(requestBody.address) and $exists(requestBody.address[0]) ? requestBody.address[0].city : null", + "phoneNumber": "$exists(requestBody.telecom) and $exists(requestBody.telecom[0]) ? requestBody.telecom[0].value : null" + }, + "inputMapping": { + "transforms.sourcePatient": "sourceId", + "transforms.nationalId": "demographicData.nationalId", + "transforms.familyName": "demographicData.familyName", + "transforms.name": "demographicData.givenName", + "requestBody.gender": "demographicData.gender", + "requestBody.birthDate": "demographicData.dob", + "transforms.city": "demographicData.city", + "transforms.phoneNumber": "demographicData.phoneNumber", + "transforms.currentDate": "uniqueInteractionData.auxDateCreated" + }, + "requests": { + "response": { + "id": "jempi", + "primary": true, + "config": { + "method": "post", + "headers": { + "contentType": "application/fhir+json" + }, + "url": "http://jempi-api:50000/JeMPI/link-interaction" + } + } + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/register.json b/client-registry-jempi/importer/mapping-mediator/register.json new file mode 100644 index 00000000..8baa43a9 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/register.json @@ -0,0 +1,55 @@ +{ + "name": "Register Patient", + "endpoint": { + "pattern": "/fhir/Patient", + "method": "POST" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "constants": { + "active": true, + "linkType": "refer" + }, + "inputTransforms": { + "id": "lookupRequests.jempiRegister.data.linkInfo.interactionUID", + "goldenId": "$exists(lookupRequests.jempiRegister.data.linkInfo.goldenUID) ? {'reference': 'Patient/' & lookupRequests.jempiRegister.data.linkInfo.goldenUID} : null" + }, + "inputMapping": { + "requestBody.resourceType": "resourceType", + "transforms.id": "id", + "requestBody.identifier": "identifier", + "constants.active": "active", + "requestBody.name": "name", + "requestBody.gender": "gender", + "requestBody.birthDate": "birthDate", + "requestBody.address": "address", + "requestBody.telecom": "telecom", + "transforms.goldenId": "link[].other", + "constants.linkType": "link[].type" + }, + "requests": { + "lookup": [ + { + "id": "jempiRegister", + "forwardExistingRequestBody": true, + "config": { + "method": "post", + "headers": { + "contentType": "application/fhir+json" + }, + "url": "http://openhim-mapping-mediator:3003/register-response", + "params": { + "query": { + "candidateThreshold": { + "path": "query.candidateThreshold" + } + } + } + }, + "fhirResponse": true + } + ] + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/search.json b/client-registry-jempi/importer/mapping-mediator/search.json new file mode 100644 index 00000000..023afc39 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/search.json @@ -0,0 +1,59 @@ +{ + "name": "Search Patient by id", + "endpoint": { + "pattern": "/fhir/Patient/:patientId", + "method": "GET" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputTransforms" : { + "id": "lookupRequests.jempiSearch.data.goldenRecord.uid", + "gender": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.gender) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.gender : null", + "birthDate": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.dob) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.dob : null", + "name": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.givenName) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.givenName : null", + "lastName": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.familyName) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.familyName : null", + "city": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.city) ? lookupRequests.jempiSearch.data.goldenRecord.demographicData.city : null", + "identifierValue": "$exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId) and $exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId.patient) ? lookupRequests.jempiSearch.data.goldenRecord.sourceId.patient : null", + "identifierSystem": "$exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId) and $exists(lookupRequests.jempiSearch.data.goldenRecord.sourceId.facility) ? lookupRequests.jempiSearch.data.goldenRecord.sourceId.facility : null", + "nationalId": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.nationalId) ? {'value': lookupRequests.jempiSearch.data.goldenRecord.demographicData.nationalId, 'system': constants.nidSystem} : null", + "phoneNumber": "$exists(lookupRequests.jempiSearch.data.goldenRecord.demographicData.phoneNumber) ? {'value': lookupRequests.jempiSearch.data.goldenRecord.demographicData.phoneNumber, 'system': constants.phone} : null", + "link": "$append([], $map(lookupRequests.jempiSearch.data.interactionsWithScore, function($v) {{'other': {'reference': 'Patient/' & $v.interaction.uid}, 'type': 'refer'}}))" + }, + "inputMapping": { + "constants.resourceType": "resourceType", + "transforms.id": "id", + "transforms.gender": "gender", + "transforms.birthDate": "birthDate", + "transforms.lastName": "name[0].family", + "transforms.name": "name[0].given[0]", + "transforms.city": "address[0].city", + "transforms.identifierSystem": "identifier[0].system", + "transforms.identifierValue": "identifier[0].value", + "transforms.nationalId": "identifier[]+", + "transforms.phoneNumber": "telecom[]+", + "transforms.link": "link" + }, + "constants": { + "nidSystem": "NationalID", + "resourceType": "Patient", + "phone": "phone" + }, + "requests": { + "lookup": { + "id": "jempiSearch", + "config": { + "method": "get", + "url": "http://jempi-api:50000/JeMPI/expanded-golden-record/:patientId", + "params": { + "url": { + "patientId": { + "path": "urlParams.patientId" + } + } + } + } + } + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/searchAll.json b/client-registry-jempi/importer/mapping-mediator/searchAll.json new file mode 100644 index 00000000..284d56b6 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/searchAll.json @@ -0,0 +1,63 @@ +{ + "name": "Search Endpoint deterministic", + "endpoint": { + "pattern": "/fhir/Patient", + "method": "GET" + }, + "transformation": { + "output": "JSON" + }, + "inputTransforms": { + "total": "$count(lookupRequests.jempiSearchAll.data.goldenRecords)", + "entry": "$map(lookupRequests.jempiSearchAll.data.goldenRecords, function($v) {{'fullUrl': 'Patient/' & $v.goldenId, 'resource': {'resourceType': 'Patient','id': $v.goldenId,'name': {'given': [$v.demographicData.givenName],'family': $v.demographicData.familyName},'address': [{'city': $v.demographicData.city}],'birthDate': $v.demographicData.dob,'telecom': [{'value': $v.demographicData.phoneNumber,'system': 'phone'}],'identifier': [{'system': $v.sourceId.facility,'value': $v.sourceId.patient},{'system': 'NationalID','value': $v.demographicData.nationalId}],'gender': $v.demographicData.gender}}})" + }, + "inputMapping": { + "constants.resourceType": "resourceType", + "constants.type": "type", + "transforms.total": "total", + "transforms.entry": "entry" + }, + "constants": { + "resourceType": "Bundle", + "type": "searchset" + }, + "requests": { + "lookup": [ + { + "id": "jempiSearchAll", + "config": { + "method": "get", + "headers": { + "contentType": "application/json" + }, + "params": { + "query": { + "family": { + "path": "query.family" + }, + "given": { + "path": "query.given" + }, + "telecom": { + "path": "query.telecom" + }, + "identifier": { + "path": "query.identifier" + }, + "gender": { + "path": "query.gender" + }, + "birthDate": { + "path": "query.birthDate" + }, + "address": { + "path": "query.address" + } + } + }, + "url": "http://openhim-mapping-mediator:3003/search-response" + } + } + ] + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/searchAllProbabilistic.json b/client-registry-jempi/importer/mapping-mediator/searchAllProbabilistic.json new file mode 100644 index 00000000..e1287e2e --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/searchAllProbabilistic.json @@ -0,0 +1,41 @@ +{ + "name": "Search Endpoint probabilistic", + "endpoint": { + "pattern": "/fhir/Patient/$match", + "method": "POST" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "constants": { + "resourceType": "Bundle", + "type": "searchset" + }, + "inputTransforms": { + "total": "$count(lookupRequests.jempiSearchAllProbabilistic.data.goldenRecords)", + "entry": "$map(lookupRequests.jempiSearchAllProbabilistic.data.goldenRecords, function($v) {{'fullUrl': 'Patient/' & $v.goldenId, 'resource': {'resourceType': 'Patient','id': $v.goldenId,'name': {'given': [$v.demographicData.givenName],'family': $v.demographicData.familyName},'address': [{'city': $v.demographicData.city}],'birthDate': $v.demographicData.dob,'telecom': [{'value': $v.demographicData.phoneNumber,'system': 'phone'}],'identifier': [{'system': $v.sourceId.facility,'value': $v.sourceId.patient},{'system': 'NationalID','value': $v.demographicData.nationalId}],'gender': $v.demographicData.gender}}})" + }, + "inputMapping": { + "constants.resourceType": "resourceType", + "constants.type": "type", + "transforms.total": "total", + "transforms.entry": "entry" + }, + "requests": { + "lookup": [ + { + "id": "jempiSearchAllProbabilistic", + "forwardExistingRequestBody": true, + "config": { + "method": "post", + "headers": { + "contentType": "application/json" + }, + "url": "http://openhim-mapping-mediator:3003/search-response-probabilistic" + }, + "fhirResponse": true + } + ] + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/searchAllProbabilisticResponse.json b/client-registry-jempi/importer/mapping-mediator/searchAllProbabilisticResponse.json new file mode 100644 index 00000000..3b52c67d --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/searchAllProbabilisticResponse.json @@ -0,0 +1,74 @@ +{ + "name": "Search Response Endpoint deterministic", + "endpoint": { + "pattern": "/search-response-probabilistic", + "method": "POST" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputValidation": { + "type": "object", + "properties": { + "requestBody": { + "type": "object", + "properties": { + "parameter": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "valueString": { + "type": "string" + }, + "part": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "valueString": { + "type": "string" + }, + "valueInteger": { + "type": "number" + } + }, + "required": ["name"] + } + } + }, + "required": ["name", "valueString", "part"] + }, + "minItems": 1 + } + }, + "required": ["parameter"] + } + } + }, + "inputTransforms": { + "operands": "$append([], $map(requestBody.parameter, function($v) {{'operator': $single($v.part, function($v) {$v.name = 'operator'}).valueString, 'operand': {'fn': $single($v.part, function($v) {$v.name = 'fn'}).valueString, 'distance': $single($v.part, function($v) {$v.name = 'distance'}).valueInteger, 'name': $v.name, 'value': $v.valueString}}}))" + }, + "inputMapping": { + "transforms.operands": "operands", + "transforms.operands[0].operand": "operand" + }, + "requests": { + "response": [ + { + "id": "jempiSearchAllProbabilisticResponse", + "config": { + "method": "post", + "url": "http://jempi-api:50000/JeMPI/cr-find" + } + } + ] + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/searchAllResponse.json b/client-registry-jempi/importer/mapping-mediator/searchAllResponse.json new file mode 100644 index 00000000..59b3bece --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/searchAllResponse.json @@ -0,0 +1,29 @@ +{ + "name": "Search Endpoint deterministic response", + "endpoint": { + "pattern": "/search-response", + "method": "GET" + }, + "transformation": { + "output": "JSON" + }, + "inputTransforms": { + "operands": "($transform := function($v) {[query.family ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'familyName', 'value': query.family}},query.given ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'givenName', 'value': query.given}},query.gender ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'gender', 'value': query.gender}},query.birthDate ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'dob', 'value': query.birthDate}},query.address ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'city', 'value': query.address}},query.telecom ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'phoneNumber', 'value': query.telecom}},query.identifier ? {'operator': 'and', 'operand': {'fn': 'eq', 'name': 'nationalId', 'value': query.identifier}}]};$transform(query))" + }, + "inputMapping": { + "transforms.operands": "operands", + "transforms.operands[0].operand": "operand" + }, + "requests": { + "response": [ + { + "id": "jempiSearchAllResponse", + "forwardExistingRequestBody": true, + "config": { + "method": "post", + "url": "http://jempi-api:50000/JeMPI/cr-find" + } + } + ] + } +} diff --git a/client-registry-jempi/importer/mapping-mediator/update-response.json b/client-registry-jempi/importer/mapping-mediator/update-response.json new file mode 100644 index 00000000..dba06cba --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/update-response.json @@ -0,0 +1,43 @@ +{ + "name": "Update Patient Response", + "endpoint": { + "pattern": "/update-patient-response/:patientId", + "method": "POST" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputTransforms": { + "familyName": "$exists(requestBody.name) and $exists(requestBody.name[0]) and $exists(requestBody.name[0].family) ? {'name': 'familyName', 'value': requestBody.name[0].family} : null", + "givenName": "$exists(requestBody.name) and $exists(requestBody.name[0]) and $exists(requestBody.name[0].given) and $exists(requestBody.name[0].given[0]) ? {'name': 'givenName', 'value': requestBody.name[0].given[0]} : null", + "birthDate": "$exists(requestBody.birthDate) ? {'name': 'dob', 'value': requestBody.birthDate} : null", + "gender": "$exists(requestBody.gender) ? {'name': 'gender', 'value': requestBody.gender} : null", + "city": "$exists(requestBody.address) and $exists(requestBody.address[0]) and $exists(requestBody.address[0].city) ? {'name': 'city', 'value': requestBody.address[0].city} : null", + "phoneNumber": "$exists(requestBody.telecom) and $exists(requestBody.telecom[0]) and $exists(requestBody.telecom[0].value) ? {'name': 'phoneNumber', 'value': requestBody.telecom[0].value} : null", + "nationalId": "$exists(requestBody.identifier) and $exists(requestBody.identifier[0]) and $count($filter(requestBody.identifier, function($v) {$contains($v.system, 'NationalID')})) > 0 ? {'name': 'nationalId', 'value': $filter(requestBody.identifier, function($v) {$contains($v.system, 'NationalID')})[0].value} : null" + }, + "inputMapping": { + "urlParams.patientId": "goldenId", + "transforms.familyName": "fields[]+", + "transforms.givenName": "fields[]+", + "transforms.birthDate": "fields[]+", + "transforms.gender": "fields[]+", + "transforms.city": "fields[]+", + "transforms.phoneNumber": "fields[]+", + "transforms.nationalId": "fields[]+" + }, + "requests": { + "response": { + "id": "jempiUpdateResponse", + "primary": true, + "config": { + "method": "patch", + "headers": { + "contentType": "application/json" + }, + "url": "http://jempi-api:50000/JeMPI/cr-update-fields" + } + } + } + } diff --git a/client-registry-jempi/importer/mapping-mediator/update.json b/client-registry-jempi/importer/mapping-mediator/update.json new file mode 100644 index 00000000..5336d3d7 --- /dev/null +++ b/client-registry-jempi/importer/mapping-mediator/update.json @@ -0,0 +1,44 @@ +{ + "name": "Update Patient", + "endpoint": { + "pattern": "/fhir/Patient/:patientId", + "method": "PUT" + }, + "transformation": { + "input": "JSON", + "output": "JSON" + }, + "inputMapping": { + "urlParams.patientId": "id", + "requestBody.resourceType": "resourceType", + "requestBody.identifier": "identifier", + "requestBody.name": "name", + "requestBody.gender": "gender", + "requestBody.birthDate": "birthDate", + "requestBody.address": "address", + "requestBody.telecom": "telecom" + }, + "requests": { + "lookup": [ + { + "id": "jempiUpdate", + "forwardExistingRequestBody": true, + "config": { + "method": "post", + "params": { + "url": { + "patientId": { + "path": "urlParams.patientId" + } + } + }, + "headers": { + "contentType": "application/json" + }, + "url": "http://openhim-mapping-mediator:3003/update-patient-response/:patientId" + }, + "fhirResponse": true + } + ] + } + } diff --git a/client-registry-jempi/importer/openhim/openhim-import.json b/client-registry-jempi/importer/openhim/openhim-import.json index fcde3678..7e49269b 100644 --- a/client-registry-jempi/importer/openhim/openhim-import.json +++ b/client-registry-jempi/importer/openhim/openhim-import.json @@ -1,174 +1,67 @@ { "Channels": [ { + "name": "JeMPI Patient endpoints in fhir", + "description": "JeMPI Patient endpoints in fhir format", + "urlPattern": "^/fhir/Patient/?[^/]*$", "methods": [ - "POST" + "GET", + "POST", + "DELETE", + "PUT", + "OPTIONS", + "HEAD", + "TRACE", + "CONNECT", + "PATCH" ], "type": "http", - "allow": [ - "instant" - ], - "whitelist": [], - "authType": "private", - "matchContentTypes": [], - "properties": [], - "txViewAcl": [], - "txViewFullAcl": [], - "txRerunAcl": [], - "status": "enabled", - "rewriteUrls": false, - "addAutoRewriteRules": true, - "autoRetryEnabled": false, - "autoRetryPeriodMinutes": 60, - "routes": [ - { - "type": "http", - "status": "enabled", - "forwardAuthHeader": false, - "name": "JeMPI Sync Receiver", - "secured": false, - "host": "jempi-sync-receiver", - "port": 50000, - "path": "/fhir", - "pathTransform": "", - "primary": true, - "username": "", - "password": "" - } - ], + "tcpPort": null, + "tcpHost": null, + "pollingSchedule": null, "requestBody": true, "responseBody": true, - "rewriteUrlsConfig": [], - "name": "JeMPI Sync Receiver", - "urlPattern": "^/jempi/fhir.*$", - "matchContentRegex": null, - "matchContentXpath": null, - "matchContentValue": null, - "matchContentJson": null, - "pollingSchedule": null, - "tcpHost": null, - "tcpPort": null, - "updatedBy": { - "id": "638a089bed7a51001325406f", - "name": "Super User" - }, - "alerts": [] - }, - { - "methods": [ - "POST" - ], - "type": "http", - "allow": [ - "instant" - ], + "allow": ["instant"], "whitelist": [], "authType": "private", - "matchContentTypes": [], - "properties": [], - "txViewAcl": [], - "txViewFullAcl": [], - "txRerunAcl": [], - "status": "enabled", - "rewriteUrls": false, - "addAutoRewriteRules": true, - "autoRetryEnabled": false, - "autoRetryPeriodMinutes": 60, "routes": [ { + "name": "Generic Mapping Mediator", "type": "http", "status": "enabled", - "forwardAuthHeader": false, - "name": "JeMPI Async Receiver", "secured": false, - "host": "jempi-async-receiver", - "port": 50000, - "path": "/fhir", + "host": "openhim-mapping-mediator", + "port": 3003, + "path": "", "pathTransform": "", "primary": true, "username": "", - "password": "" + "password": "", + "forwardAuthHeader": true, + "waitPrimaryResponse": false, + "statusCodesCheck": "2**" } ], - "requestBody": true, - "responseBody": true, - "rewriteUrlsConfig": [], - "name": "JeMPI Async Receiver", - "urlPattern": "^/jempi/async/fhir.*$", + "matchContentTypes": [], "matchContentRegex": null, "matchContentXpath": null, - "matchContentValue": null, "matchContentJson": null, - "pollingSchedule": null, - "tcpHost": null, - "tcpPort": null, - "updatedBy": { - "id": "638a089bed7a51001325406f", - "name": "Super User" - }, - "alerts": [] - }, - { - "methods": [ - "GET", - "POST", - "DELETE", - "PUT", - "OPTIONS", - "HEAD", - "TRACE", - "CONNECT", - "PATCH" - ], - "type": "http", - "allow": [ - "instant" - ], - "whitelist": [], - "authType": "private", - "matchContentTypes": [], + "matchContentValue": null, "properties": [], "txViewAcl": [], "txViewFullAcl": [], "txRerunAcl": [], + "alerts": [], "status": "enabled", "rewriteUrls": false, "addAutoRewriteRules": true, + "rewriteUrlsConfig": [], "autoRetryEnabled": false, "autoRetryPeriodMinutes": 60, - "routes": [ - { - "type": "http", - "status": "enabled", - "forwardAuthHeader": false, - "name": "JeMPI", - "secured": false, - "host": "jempi-api", - "port": 50000, - "path": "", - "pathTransform": "s/jempi\\/api\\///g", - "primary": true, - "username": "", - "password": "" - } - ], - "requestBody": true, - "responseBody": true, - "rewriteUrlsConfig": [], - "name": "JeMPI API", - "urlPattern": "^/jempi/api/.*$", - "matchContentRegex": null, - "matchContentXpath": null, - "matchContentValue": null, - "matchContentJson": null, - "pollingSchedule": null, - "tcpHost": null, - "tcpPort": null, "updatedBy": { - "id": "638a089bed7a51001325406f", + "id": "6527e7676dec203bde9f2aeb", "name": "Super User" - }, - "alerts": [] + } } ] } diff --git a/client-registry-jempi/package-metadata.json b/client-registry-jempi/package-metadata.json index 2a21dcec..6b0f40de 100644 --- a/client-registry-jempi/package-metadata.json +++ b/client-registry-jempi/package-metadata.json @@ -5,7 +5,7 @@ "type": "infrastructure", "version": "0.0.1", "dependencies": [ - "interoperability-layer-openhim", + "openhim-mapping-mediator", "message-bus-kafka", "identity-access-manager-keycloak" ], @@ -23,42 +23,37 @@ "JEMPI_RATEL_MEMORY_RESERVE": "500M", "JEMPI_ASYNC_RECEIVER_MEMORY_LIMIT": "3G", "JEMPI_ASYNC_RECEIVER_MEMORY_RESERVE": "500M", - "JEMPI_EM_CALCULATOR_MEMORY_LIMIT": "3G", - "JEMPI_EM_CALCULATOR_MEMORY_RESERVE": "500M", "JEMPI_CONTROLLER_MEMORY_LIMIT": "3G", "JEMPI_CONTROLLER_MEMORY_RESERVE": "500M", "JEMPI_LINKER_MEMORY_LIMIT": "3G", "JEMPI_LINKER_MEMORY_RESERVE": "500M", - "JEMPI_PRE_PROCESSOR_MEMORY_LIMIT": "3G", - "JEMPI_PRE_PROCESSOR_MEMORY_RESERVE": "500M", - "JEMPI_SYNC_RECEIVER_MEMORY_LIMIT": "3G", - "JEMPI_SYNC_RECEIVER_MEMORY_RESERVE": "500M", "JEMPI_API_MEMORY_LIMIT": "3G", "JEMPI_API_MEMORY_RESERVE": "500M", + "JEMPI_ETL_MEMORY_LIMIT": "3G", + "JEMPI_ETL_MEMORY_RESERVE": "500M", "JEMPI_API_INSTANCES": 1, - "JEMPI_KAFKA_TOPICS": "JeMPI-async-preprocessor,JeMPI-patient-controller,JeMPI-patient-em,JeMPI-patient-linker,JeMPI-mu-linker,JeMPI-notifications", - "JEMPI_ASYNC_RECEIVER_IMAGE_TAG": "0.2.0", - "JEMPI_SYNC_RECEIVER_IMAGE_TAG": "0.2.0", - "JEMPI_PRE_PROCESSOR_IMAGE_TAG": "0.2.0", - "JEMPI_CONTROLLER_IMAGE_TAG": "0.2.0", - "JEMPI_EM_CALCULATOR_IMAGE_TAG": "0.2.0", - "JEMPI_LINKER_IMAGE_TAG": "0.2.0", - "JEMPI_API_IMAGE_TAG": "0.2.0", + "JEMPI_KAFKA_TOPICS": "JeMPI-async-etl,JeMPI-interaction-controller,JeMPI-interaction-em,JeMPI-interaction-linker,JeMPI-mu-linker,JeMPI-audit-trail,JeMPI-notifications", + "JEMPI_ASYNC_RECEIVER_IMAGE_TAG": "test-recipes", + "JEMPI_CONTROLLER_IMAGE_TAG": "test-recipes", + "JEMPI_LINKER_IMAGE_TAG": "test-recipes", + "JEMPI_API_IMAGE_TAG": "test-recipes", + "JEMPI_API_KC_IMAGE_TAG": "test-recipes", + "JEMPI_ETL_IMAGE_TAG": "test-recipes", "JEMPI_ASYNC_RECEIVER_INSTANCES": 1, - "JEMPI_SYNC_RECEIVER_INSTANCES": 1, - "JEMPI_PRE_PROCESSOR_INSTANCES": 1, "JEMPI_CONTROLLER_INSTANCES": 1, - "JEMPI_EM_CALCULATOR_INSTANCES": 1, "JEMPI_LINKER_INSTANCES": 1, + "JEMPI_ETL_INSTANCES": 1, "JEMPI_OPENHIM_PASSWORD": "instant101", "JEMPI_SESSION_SECRET": "c05ll3lesrinf39t7mc5h6un6r0c69lgfno69dsak3vabeqamouq4328cuaekros401ajdpkh60rrt", "JEMPI_FILE_IMPORT_MAX_SIZE_BYTE": 128000000, - "REACT_APP_JEMPI_BASE_URL": "http://localhost:50000/JeMPI", + "REACT_APP_JEMPI_BASE_API_HOST": "http://localhost", + "REACT_APP_JEMPI_BASE_API_PORT": "50000", "REACT_APP_MOCK_BACKEND": "false", - "JEMPI_WEB_VERSION": "latest", + "JEMPI_WEB_VERSION": "test-recipes", "JEMPI_WEB_INSTANCES": 1, "JEMPI_WEB_MEMORY_LIMIT": "2G", "JEMPI_WEB_MEMORY_RESERVE": "500M", + "KC_JEMPI_SSO_ENABLED": "false", "KC_FRONTEND_URL": "http://localhost:9088", "KC_REALM_NAME": "platform-realm", "KC_API_URL": "http://identity-access-manager-keycloak:8080", @@ -75,6 +70,21 @@ "JEMPI_POSTGRES_MEMORY_LIMIT": "3G", "JEMPI_POSTGRES_MEMORY_RESERVE": "500M", "JEMPI_SESSION_SECURE": false, - "DOMAIN_NAME": "" + "JEMPI_SESSION_DOMAIN_NAME": "localhost", + "DOMAIN_NAME": "", + "POSTGRESQL_PASSWORD": "postgres", + "KAFKA_APPLICATION_ID_API": "api-app-id", + "DGRAPH_HOSTS": "jempi-alpha-01,jempi-alpha-02,jempi-alpha-03", + "DGRAPH_PORTS": "9080,9081,9082", + "LOG4J2_LEVEL": "DEBUG", + "KAFKA_APPLICATION_ID_INTERACTIONS": "app-id-link1", + "KAFKA_APPLICATION_ID_MU": "app-id-link2", + "KAFKA_CLIENT_ID_NOTIFICATIONS": "app-id-link3", + "LINKER_MATCH_THRESHOLD": 0.65, + "LINKER_MATCH_THRESHOLD_MARGIN": 0.1, + "KAFKA_CLIENT_ID_ASYNC": "client-id-syncrx", + "KAFKA_APPLICATION_ID_ETL": "app-id-etl", + "KAFKA_APPLICATION_ID_CTRL": "app-id-ctrl", + "KAFKA_CLIENT_ID_CTRL": "client-id-ctrl" } } diff --git a/client-registry-jempi/swarm.sh b/client-registry-jempi/swarm.sh index 5ef5ce66..cb556585 100644 --- a/client-registry-jempi/swarm.sh +++ b/client-registry-jempi/swarm.sh @@ -62,6 +62,9 @@ function initialize_package() { log info "Importing JeMPI Kafka topics" docker::deploy_config_importer $STACK "$COMPOSE_FILE_PATH/importer/docker-compose.config.yml" "jempi-kafka-config-importer" "jempi-kafka" + log info "Importing mapping endpoints" + docker::deploy_config_importer $STACK "$COMPOSE_FILE_PATH/importer/mapping-mediator/docker-compose.config.yml" "mapping-mediator-config-importer" "jempi" + log info "Deploy Dgraph" docker::deploy_service $STACK "${COMPOSE_FILE_PATH}" "docker-compose.dgraph-zero.yml" "$dgraph_zero_dev_compose_param" "$dgraph_zero_cluster_compose_param" diff --git a/config.yaml b/config.yaml index 04c40d90..c548503f 100644 --- a/config.yaml +++ b/config.yaml @@ -1,5 +1,5 @@ projectName: platform -image: jembi/platform:2.5.0 +image: jembi/platform:latest logPath: /tmp/logs packages: @@ -23,3 +23,52 @@ packages: - client-registry-jempi - identity-access-manager-keycloak - openhim-mapping-mediator + +profiles: + - name: cdr-dw + packages: + - interoperability-layer-openhim + - reverse-proxy-nginx + - fhir-datastore-hapi-fhir + - message-bus-kafka + - job-scheduler-ofelia + - kafka-mapper-consumer + - kafka-unbundler-consumer + - analytics-datastore-clickhouse + - dashboard-visualiser-superset + - monitoring + - mpi-mediator + - client-registry-jempi + - identity-access-manager-keycloak + - openhim-mapping-mediator + envFiles: + - cdr-dw.env + + - name: cdr + packages: + - interoperability-layer-openhim + - reverse-proxy-nginx + - fhir-datastore-hapi-fhir + - message-bus-kafka + - job-scheduler-ofelia + - kafka-unbundler-consumer + - monitoring + - mpi-mediator + - client-registry-jempi + - identity-access-manager-keycloak + - openhim-mapping-mediator + envFiles: + - cdr.env + + - name: mpi + packages: + - interoperability-layer-openhim + - reverse-proxy-nginx + - message-bus-kafka + - job-scheduler-ofelia + - monitoring + - client-registry-jempi + - identity-access-manager-keycloak + - openhim-mapping-mediator + envFiles: + - mpi.env diff --git a/dashboard-visualiser-superset/importer/config/superset-export.zip b/dashboard-visualiser-superset/importer/config/superset-export.zip index 166fefae..e0a6b81e 100644 Binary files a/dashboard-visualiser-superset/importer/config/superset-export.zip and b/dashboard-visualiser-superset/importer/config/superset-export.zip differ diff --git a/docs/packages/dashboard-visualiser-js-report/environment-variables.md b/docs/packages/dashboard-visualiser-js-report/environment-variables.md index 3b2afe94..ea41d076 100644 --- a/docs/packages/dashboard-visualiser-js-report/environment-variables.md +++ b/docs/packages/dashboard-visualiser-js-report/environment-variables.md @@ -6,21 +6,4 @@ description: Listed in this page are all environment variables needed to run Jsr -| Variable Name | Type | Relevance | Required | Default | -| --------------------------- | ------- | ---------------------------------------------------------------------------------------------------- | ------------------------------------------- | --------------------------------------- | -| JS\_REPORT\_LICENSE\_KEY | String | Service license key | Yes | | -| JS\_REPORT | String | Jsreport service password | No | dev\_password\_only | -| JS\_REPORT\_USERNAME | String | Jsreport service username | No | admin | -| JS\_REPORT\_SECRET | String | Secret password for the authentication of a cookie session related to the extension used in Jsreport | No | dev\_secret\_only | -| ES\_HOSTS | String | Elasticsearch connection string | No | analytics-datastore-elastic-search:9200 | -| ES\_PASSWORD | String | Elasticsearch password (for request authentication) | No | dev\_password\_only | -| ES\_USERNAME | String | Elasticsearch username (for request authentication | No | elastic | -| JS\_REPORT\_INSTANCES | Number | Number of service replicas | No | 1 | -| JS\_REPORT\_SSL | Boolean | SSL protocol requirement | No | false | -| JS\_REPORT\_CONFIG\_FILE | String | Path to the service import file | No | export.jsrexport | -| JS\_REPORT\_DEV\_MOUNT | Boolean | Dev mount mode enabling flag | No | false | -| JS\_REPORT\_PACKAGE\_PATH | String | Local path to package | Yes if `JS_REPORT_DEV_MOUNT` is set to true | | -| JS\_REPORT\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| JS\_REPORT\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| JS\_REPORT\_CPU\_RESERVE | Number | Reserved CPU | No | 0.05 | -| JS\_REPORT\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | +
Variable NameTypeRelevanceRequiredDefault
JS_REPORT_LICENSE_KEYStringService license keyYes
JS_REPORTStringJsreport service passwordNodev_password_only
JS_REPORT_USERNAMEStringJsreport service usernameNoadmin
JS_REPORT_SECRETStringSecret password for the authentication of a cookie session related to the extension used in JsreportNodev_secret_only
ES_HOSTSStringElasticsearch connection stringNoanalytics-datastore-elastic-search:9200
ES_PASSWORDStringElasticsearch password (for request authentication)Nodev_password_only
ES_USERNAMEStringElasticsearch username (for request authenticationNoelastic
JS_REPORT_INSTANCESNumberNumber of service replicasNo1
JS_REPORT_SSLBooleanSSL protocol requirementNofalse
JS_REPORT_CONFIG_FILEStringPath to the service import fileNoexport.jsrexport
JS_REPORT_DEV_MOUNTBooleanDev mount mode enabling flagNofalse
JS_REPORT_PACKAGE_PATHStringLocal path to packageYes if JS_REPORT_DEV_MOUNT is set to true
JS_REPORT_CPU_LIMITNumberCPU usage limitNo0
JS_REPORT_MEMORY_LIMITStringRAM usage limitNo3G
JS_REPORT_CPU_RESERVENumberReserved CPUNo0.05
JS_REPORT_MEMORY_RESERVEStringReserved RAMNo500M
diff --git a/docs/packages/dashboard-visualiser-kibana/environment-variables.md b/docs/packages/dashboard-visualiser-kibana/environment-variables.md index 3220bde7..bbdba3e9 100644 --- a/docs/packages/dashboard-visualiser-kibana/environment-variables.md +++ b/docs/packages/dashboard-visualiser-kibana/environment-variables.md @@ -4,16 +4,4 @@ description: Listed in this page are all environment variables needed to run Kib # Environment Variables - - -| Variable Name | Type | Relevance | Required | Default | -| ------------------------- | ------- | -------------------------------------- | -------- | -------------------- | -| ES\_KIBANA_\__SYSTEM | String | ElasticSearch auth username | Yes | | -| KIBANA\_INSTANCES | Number | Number of service replicas | No | 1 | -| KIBANA\_YML_\__CONFIG | String | Path to the service configuration file | No | kibana-kibana.yml | -| KIBANA\_USERNAME | String | Service username | No | elastic | -| KIBANA\_PASSWORD | String | Service password | No | dev\_password\_only | -| KIBANA\_SSL | Boolean | SSL protocol requirement | No | True | -| KIBANA\_CONFIG_\__FILE | String | Path to the dashboard import file | No | kibana-export.ndjson | -| KIBANA\_MEMORY_\__LIMIT | String | RAM usage limit | No | 3G | -| KIBANA\_MEMORY_\__RESERVE | String | Reserved RAM | No | 500M | +
Variable NameTypeRelevanceRequiredDefault
ES_KIBANA_SYSTEMStringElasticSearch auth usernameYes
KIBANA_INSTANCESNumberNumber of service replicas No1
KIBANA_YML_CONFIGStringPath to the service configuration fileNokibana-kibana.yml
KIBANA_USERNAMEStringService usernameNoelastic
KIBANA_PASSWORDStringService passwordNodev_password_only
KIBANA_SSLBooleanSSL protocol requirementNoTrue
KIBANA_CONFIG_FILEStringPath to the dashboard import fileNokibana-export.ndjson
KIBANA_MEMORY_LIMITStringRAM usage limitNo3G
KIBANA_MEMORY_RESERVEStringReserved RAMNo500M
diff --git a/documentation/README (1).md b/documentation/README (1).md index 09fb4db4..ba4a3981 100644 --- a/documentation/README (1).md +++ b/documentation/README (1).md @@ -1,5 +1,5 @@ --- -description: What you need to start using Platform. +description: What you need to start using OpenHIM Platform. --- # Getting Started @@ -19,9 +19,18 @@ The following tools are needed to run/deploy platform: 1. Once Docker is installed initialise Docker Swarm: `docker swarm init` 2. Download the [Instant OpenHIE 2 binary](https://jembi.gitbook.io/instant-v2/getting-started). Once you are able to execute the instant executable, return here. -3. Create the logging directory using `mkdir -p /tmp/logs/` -4. Download the latest Jembi Platform config file which configures Instant OpenHIE 2 to use Jembi Platform packages: `wget https://raw.githubusercontent.com/jembi/platform/main/config.yaml` -5. Download the latest environment variable file, which sets configuration options for Jembi Platform packages: `wget https://raw.githubusercontent.com/jembi/platform/main/.env.local` -6. Launch some Jembi Platform packages, e.g. `./instant package init --name interoperability-layer-openhim --name message-bus-kafka --env-file .env.local --dev` This launches the OpenHIM and Kafka packages in dev mode (which exposes service ports for development purposes) using the config supplied in the env var file. +3. Go to the OpenHIM Platform [https://github.com/jembi/platform/releases/latest](https://github.com/jembi/platform/releases/latest) page and use the `` for the following steps. +4. Download the latest OpenHIM Platform config file which configures Instant OpenHIE v2 to use OpenHIM Platform packages: `wget https://raw.githubusercontent.com/jembi/platform//config.yaml` -Next, you might want to browse the packages available in Jembi Platform. Each package's documentation lists the variables used to configure them. For more information on how to start stop and destroy packages using the command line, see the [Instant OpenHIE 2 CLI docs](https://jembi.gitbook.io/instant-v2/cli). +{% hint style="info" %} + e.g. `wget https://raw.githubusercontent.com/jembi/platform/2.5.0/config.yaml` +{% endhint %} + +5. Download the latest environment variable file, which sets configuration options for OpenHIM Platform packages: `wget https://raw.githubusercontent.com/jembi/platform//.env.local` +6. Launch some OpenHIM Platform packages, e.g. `./instant package init --name interoperability-layer-openhim --name message-bus-kafka --env-file .env.local --dev` This launches the OpenHIM and Kafka packages in dev mode (which exposes service ports for development purposes) using the config supplied in the env var file. + +Next, you might want to browse the [recipes](recipes/) available in OpenHIM Platform. Each recipe bundles a set of packages and configuration to setup an HIE for a particular purpose. + +Alternatively you can also browse the individual set of [packages](packages/) that OpenHIM Platform offers. Each package's documentation lists the environment variables used to configure them. + +For more information on how to start stop and destroy packages using the command line, see the [Instant OpenHIE 2 CLI docs](https://jembi.gitbook.io/instant-v2/cli). diff --git a/documentation/README.md b/documentation/README.md index 7912e289..fefbead8 100644 --- a/documentation/README.md +++ b/documentation/README.md @@ -1,26 +1,36 @@ --- -description: What is the Jembi Platform and what can you use it for? +description: What is the OpenHIM Platform and what can you use it for? +cover: >- + https://images.unsplash.com/photo-1639815189096-f75717eaecfe?crop=entropy&cs=srgb&fm=jpg&ixid=M3wxOTcwMjR8MHwxfHNlYXJjaHwzfHxjb25uZWN0aW5nJTIwYmxvY2tzJTIwZGlnaXRhbHxlbnwwfHx8fDE2OTg4MzAyNjl8MA&ixlib=rb-4.0.3&q=85 +coverY: 0 +layout: landing --- -# About +# OpenHIM Platform -Jembi platform is an easy was to setup, manage and operate a Health Information Exchange (HIE). Specifically it is the following: +{% embed url="https://youtu.be/37MvrolxHto" fullWidth="false" %} -* A toolbox of open-source tools, grouped into packages, that are used to in an HIE. -* The glue that ties these tools together. These are often in the form of OpenHIM mediators which are just microservices that talk to the OpenHIM. +OpenHIM platform is an easy way to set up, manage and operate a Health Information Exchange (HIE). Specifically, it is the following: + +* A toolbox of open-source tools, grouped into packages, that are used within an HIE. +* The glue that ties these tools together. These are often in the form of OpenHIM mediators which are just microservices that talk to OpenHIM. * A CLI tool to deploy and manage these packages. +{% content-ref url="README (1).md" %} +[README (1).md]() +{% endcontent-ref %} + ### The Problem -We at Jembi want to stop rebuilding solutions from near scratch each time we need an HIE implementation. It would beneficial to us and other doing the same work to focus more on the unique needs of a country rather than the intricacies of a production deployment of an HIE. +We at Jembi want to stop rebuilding solutions from near scratch each time we need an HIE implementation. It would be beneficial to us and others doing the same work to focus more on the unique needs of a country rather than the intricacies of a production deployment of an HIE. Operating production-grade HIE systems is hard, because of these issues: * Need to support up to national scale -* An always present need for high level of security -* Difficult of deploying complex system that have many components +* An always-present need for high level of security +* Difficulty of deploying complex systems that have many components * Considerations for high availability/fault tolerance -* Setting up monitoring all services within the HIE +* Setting up monitoring of all services within an HIE * Common HIE services require very specific knowledge, i.e.: * Patient matching * Efficient reporting @@ -28,10 +38,14 @@ Operating production-grade HIE systems is hard, because of these issues: ### The Solution -Jembi Platform provides an opinionated way to to deploy, secure and scale highly-available services for an HIE environment. It provides a set of services to solve common HIE challenges: +OpenHIM Platform provides an opinionated way to deploy, secure and scale highly-available services for an HIE environment. It provides a set of services to solve common HIE challenges: * Patient matching * FHIR support * Reporting services * Extensible for country needs * Deploying/Operating/Managing HIE services + +{% hint style="info" %} +OpenHIM Platform is powered by the [Instant OpenHIE deployment tool](https://jembi.gitbook.io/instant-v2/). +{% endhint %} diff --git a/documentation/SUMMARY.md b/documentation/SUMMARY.md index 35459246..ebc6a855 100644 --- a/documentation/SUMMARY.md +++ b/documentation/SUMMARY.md @@ -1,8 +1,12 @@ # Table of contents -* [About](README.md) +* [OpenHIM Platform](README.md) * [Getting Started]() -* [Packages](packages/README.md) +* [📜 Recipes](recipes/README.md) + * [Central Data Repository with Data Warehousing](recipes/central-data-repository-with-data-warehousing.md) + * [Central Data repository (no reporting)](recipes/central-data-repository-no-reporting.md) + * [Master Patient Index](recipes/master-patient-index.md) +* [📦 Packages](packages/README.md) * [Interoperability Layer Openhim](packages/interoperability-layer-openhim/README.md) * [Local Development](packages/interoperability-layer-openhim/local-development.md) * [Environment Variables](packages/interoperability-layer-openhim/environment-variables.md) @@ -48,13 +52,14 @@ * [Reverse Proxy Nginx](packages/reverse-proxy-nginx/README.md) * [Local Development](packages/reverse-proxy-nginx/local-development.md) * [Environment Variables](packages/reverse-proxy-nginx/environment-variables.md) -* [Development](development.md) -* [Provisioning up remote servers](provisioning-up-remote-servers/README.md) +* [Provisioning remote servers](provisioning-up-remote-servers/README.md) * [Ansible](provisioning-up-remote-servers/ansible.md) * [Terraform](provisioning-up-remote-servers/terraform.md) -* [Config Importing](config-importing.md) * [Resource Allocations](resource-allocations.md) +* [Development](development/README.md) + * [Config Importing](development/config-importing.md) * [Disaster Recovery Process](disaster-recovery-process/README.md) * [Elasticsearch](disaster-recovery-process/elasticsearch.md) * [HAPI FHIR Data](disaster-recovery-process/hapi-fhir-data.md) * [OpenHIM Data](disaster-recovery-process/openhim-data.md) +* [Community](community.md) diff --git a/documentation/community.md b/documentation/community.md new file mode 100644 index 00000000..87d2e102 --- /dev/null +++ b/documentation/community.md @@ -0,0 +1,5 @@ +# Community + +We encourage any contributions and suggestions! If you would like to get involved, please visit us on [Github](https://github.com/jembi/platform/). Feel free to submit an issue or to create a PR to see your features included in the project. + +We look forward to growing the set of capabilities within OpenHIM Platofrm together! diff --git a/documentation/development.md b/documentation/development/README.md similarity index 76% rename from documentation/development.md rename to documentation/development/README.md index f0a9a80d..3593d20b 100644 --- a/documentation/development.md +++ b/documentation/development/README.md @@ -2,7 +2,7 @@ ## Adding Packages -* The Go Cli runs all services from the `jembi/platform` docker image. When adding new packages or updating existing packages to Platform you will need to build/update your local `jembi/platform` image. [How to build the image](). -* As you add new packages to the platform remember to list them in the `config.yml` file - otherwise the added package will not be detected by the [platform-cli tool](http://localhost:5000/o/lTiMw1wKTVQEjepxV4ou/s/TwrbQZir3ZdvejunAFia/). +* The Go Cli runs all services from the `jembi/platform` docker image. When adding new packages or updating existing packages to Platform you will need to build/update your local `jembi/platform` image. [How to build the image](<../README (1).md>). +* As you add new packages to the platform remember to list them in the `config.yml` file - otherwise the added package will not be detected by the [platform-cli tool](http://127.0.0.1:5000/o/lTiMw1wKTVQEjepxV4ou/s/TwrbQZir3ZdvejunAFia/). ## diff --git a/documentation/config-importing.md b/documentation/development/config-importing.md similarity index 100% rename from documentation/config-importing.md rename to documentation/development/config-importing.md diff --git a/documentation/packages/README.md b/documentation/packages/README.md index 94c5dfaf..6fbcac9b 100644 --- a/documentation/packages/README.md +++ b/documentation/packages/README.md @@ -1,9 +1,9 @@ --- description: >- - The Jembi Platform includes a number of base packages which are useful for + The OpenHIM Platform includes a number of base packages which are useful for supporting Health Information Exchanges Workflows. Each section below describes the details of these packages. --- -# Packages +# 📦 Packages diff --git a/documentation/packages/analytics-datastore-elasticsearch/environment-variables.md b/documentation/packages/analytics-datastore-elasticsearch/environment-variables.md index 8072cde5..9db1f011 100644 --- a/documentation/packages/analytics-datastore-elasticsearch/environment-variables.md +++ b/documentation/packages/analytics-datastore-elasticsearch/environment-variables.md @@ -6,17 +6,4 @@ description: >- # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ---------------------------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ---------------------------------- | -| ES\_ELASTIC | String | Elasticsearch super-user password | Yes | dev\_password\_only | -| ES\_KIBANA\_SYSTEM | String | The password for the user Kibana used to connect and communicate with Elasticsearch | Yes | dev\_password\_only | -| ES\_LOGSTASH\_SYSTEM | String | The password for the user Logstash used to map and transform the data before storing it in Elasticsearch | Yes | dev\_password\_only | -| ES\_BEATS\_SYSTEM | String | The password for the user the Beats use when storing monitoring information in Elasticsearch | Yes | dev\_password\_only | -| ES\_REMOTE\_MONITORING\_USER | String | The password for the user Metricbeat used when collecting and storing monitoring information in Elasticsearch. It has the remote\_monitoring\_agent and remote\_monitoring\_collector built-in roles | Yes | dev\_password\_only | -| ES\_APM\_SYSTEM | String | The password for the user of the APM server used when storing monitoring information in Elasticsearch | Yes | dev\_password\_only | -| ES\_LEADER\_NODE | String |

Specify the leader service name (the service name in case single mode and the leader service name in case cluster mode)

This is used for the config importer. Specifying the service name to initialize the mapping inside Elasticsearch

| Yes | analytics-datastore-elastic-search | -| ES\_HEAP\_SIZE | String |

The heap size is the amount of RAM allocated to the Java Virtual Machine of a node in Elasticsearch

It should be set -Xms and -Xmx to the same value (50% of the total available RAM to a maximum of 31GB)

| No | -Xms2048m -Xmx2048m | -| ES\_SSL | Boolean | This variable is used only for the config importer of Elasticsearch (internal connection between docker services the elastic and the importer) | No | false | -| ES\_MEMORY\_LIMIT | String | RAM usage limit of Elasticsearch service | No | 3G | -| ES\_MEMORY\_RESERVE | String | Reserved RAM for Elasticsearch service | No | 500M | -| ES\_PATH\_REPO | String | The path to the repository in the container to store Elasticsearch backup snapshots | No | /backups/elasticsearch | +
Variable NameTypeRelevanceRequiredDefault
ES_ELASTICStringElasticsearch super-user passwordYesdev_password_only
ES_KIBANA_SYSTEMStringThe password for the user Kibana used to connect and communicate with ElasticsearchYesdev_password_only
ES_LOGSTASH_SYSTEMStringThe password for the user Logstash used to map and transform the data before storing it in ElasticsearchYesdev_password_only
ES_BEATS_SYSTEMStringThe password for the user the Beats use when storing monitoring information in ElasticsearchYesdev_password_only
ES_REMOTE_MONITORING_USERStringThe password for the user Metricbeat used when collecting and storing monitoring information in Elasticsearch. It has the remote_monitoring_agent and remote_monitoring_collector built-in rolesYesdev_password_only
ES_APM_SYSTEMStringThe password for the user of the APM server used when storing monitoring information in ElasticsearchYesdev_password_only
ES_LEADER_NODEString

Specify the leader service name (the service name in case single mode and the leader service name in case cluster mode)

This is used for the config importer. Specifying the service name to initialize the mapping inside Elasticsearch

Yesanalytics-datastore-elastic-search
ES_HEAP_SIZEString

The heap size is the amount of RAM allocated to the Java Virtual Machine of a node in Elasticsearch

It should be set -Xms and -Xmx to the same value (50% of the total available RAM to a maximum of 31GB)

No-Xms2048m -Xmx2048m
ES_SSLBooleanThis variable is used only for the config importer of Elasticsearch (internal connection between docker services the elastic and the importer)Nofalse
ES_MEMORY_LIMITStringRAM usage limit of Elasticsearch serviceNo3G
ES_MEMORY_RESERVEStringReserved RAM for Elasticsearch serviceNo500M
ES_PATH_REPOStringThe path to the repository in the container to store Elasticsearch backup snapshotsNo/backups/elasticsearch
diff --git a/documentation/packages/client-registry-santempi/environment-variables.md b/documentation/packages/client-registry-santempi/environment-variables.md index dd3d60f1..3036629a 100644 --- a/documentation/packages/client-registry-santempi/environment-variables.md +++ b/documentation/packages/client-registry-santempi/environment-variables.md @@ -4,15 +4,7 @@ description: Listed in this page are all environment variables needed to run Kib # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ----------------------------------- | ------ | ------------------------------------------------- | -------- | ---------------------------------------------- | -| SANTEMPI\_INSTANCES | Number | Number of service replicas | No | 1 | -| SANTEMPI\_MAIN\_CONNECTION\_STRING | String | Connection string to SanteMPI | No | _Check below table_ | -| SANTEMPI\_AUDIT\_CONNECTION\_STRING | String | Audit connection string to SanteMPI | No | _Check below table_ | -| SANTEMPI\_POSTGRESQL\_PASSWORD | String | SanteMPI postgreSQL password | No | SanteDB123 | -| SANTEMPI\_POSTGRESQL\_USERNAME | String | SanteMPI postgreSQL username | No | santempi | -| SANTEMPI\_REPMGR\_PRIMARY\_HOST | String | SanteMPI postgreSQL replicas manager primary host | No | santempi-psql-1 | -| SANTEMPI\_REPMGR\_PARTNER\_NODES | String | SanteMPI postgreSQL replicas manager nodes hosts | Yes | santempi-psql-1,santempi-psql-2,santempi-psql- | +
Variable NameTypeRelevanceRequiredDefault
SANTEMPI_INSTANCESNumberNumber of service replicas No1
SANTEMPI_MAIN_CONNECTION_STRINGStringConnection string to SanteMPINoCheck below table
SANTEMPI_AUDIT_CONNECTION_STRINGStringAudit connection string to SanteMPINoCheck below table
SANTEMPI_POSTGRESQL_PASSWORDStringSanteMPI postgreSQL passwordNoSanteDB123
SANTEMPI_POSTGRESQL_USERNAMEStringSanteMPI postgreSQL usernameNosantempi
SANTEMPI_REPMGR_PRIMARY_HOSTStringSanteMPI postgreSQL replicas manager primary hostNosantempi-psql-1
SANTEMPI_REPMGR_PARTNER_NODESStringSanteMPI postgreSQL replicas manager nodes hostsYessantempi-psql-1,santempi-psql-2,santempi-psql-
### Note diff --git a/documentation/packages/dashboard-visualiser-superset/environment-variables.md b/documentation/packages/dashboard-visualiser-superset/environment-variables.md index a1d68a54..35ce9c76 100644 --- a/documentation/packages/dashboard-visualiser-superset/environment-variables.md +++ b/documentation/packages/dashboard-visualiser-superset/environment-variables.md @@ -4,14 +4,4 @@ description: Listed in this page are all environment variables needed to run Sup # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ----------------------- | ------- | --------------------------------- | -------- | ------------------- | -| SUPERSET\_USERNAME | String | Service username | No | admin | -| SUPERSET\_FIRSTNAME | String | Admin account first name | No | SUPERSET | -| SUPERSET\_LASTNAME | String | Admin account last name | No | ADMIN | -| SUPERSET\_EMAIL | String | Admin account email address | No | admin@superset.com | -| SUPERSET\_PASSWORD | String | Admin account password | No | admin | -| SUPERSET\_API\_USERNAME | String | Service username | No | admin | -| SUPERSET\_API\_PASSWORD | String | Service password | No | admin | -| SUPERSET\_SSL | Boolean | SSL protocol requirement | No | False | -| CONFIG_\__FILE | String | Path to the dashboard import file | No | superset-export.zip | +
Variable NameTypeRelevanceRequiredDefault
SUPERSET_USERNAMEStringService usernameNoadmin
SUPERSET_FIRSTNAMEStringAdmin account first nameNoSUPERSET
SUPERSET_LASTNAMEStringAdmin account last name NoADMIN
SUPERSET_EMAILStringAdmin account email address Noadmin@superset.com
SUPERSET_PASSWORDStringAdmin account password Noadmin
SUPERSET_API_USERNAMEStringService username Noadmin
SUPERSET_API_PASSWORDStringService password Noadmin
SUPERSET_SSLBooleanSSL protocol requirementNoFalse
CONFIG_FILEStringPath to the dashboard import fileNosuperset-export.zip
diff --git a/documentation/packages/data-mapper-logstash/environment-variables.md b/documentation/packages/data-mapper-logstash/environment-variables.md index b34252fd..46c16ec9 100644 --- a/documentation/packages/data-mapper-logstash/environment-variables.md +++ b/documentation/packages/data-mapper-logstash/environment-variables.md @@ -6,14 +6,4 @@ description: Listed in this page are all environment variables needed to run Log -| Variable Name | Type | Relevance | Required | Default | -| ------------------------- | ------- | -------------------------------------------------------------------------------------------------- | ----------------------------------- | --------------------------------------- | -| LOGSTASH\_INSTANCES | Number | Number of service replicas | No | 1 | -| LOGSTASH\_DEV\_MOUNT | Boolean | DEV mount mode enabling flag | No | false | -| LOGSTASH\_PACKAGE\_PATH | String | Logstash package absolute path | yes if `LOGSTASH_DEV_MOUNT` is true | | -| LS\_JAVA\_OPTS | String | JVM heap size, it should be no less than 4GB and no more than 8GB (maximum of 50-75% of total RAM) | No | -Xmx2g -Xms2g | -| ES\_ELASTIC | String | ElasticSearch Logstash user password | Yes | dev\_password\_only | -| ES\_HOSTS | String | Elasticsearch connection string | Yes | analytics-datastore-elastic-search:9200 | -| KIBANA\_SSL | Boolean | SSL protocol requirement | No | True | -| LOGSTASH\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| LOGSTASH\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | +
Variable NameTypeRelevanceRequiredDefault
LOGSTASH_INSTANCESNumberNumber of service replicas No1
LOGSTASH_DEV_MOUNTBooleanDEV mount mode enabling flagNofalse
LOGSTASH_PACKAGE_PATHStringLogstash package absolute pathyes if LOGSTASH_DEV_MOUNT is true
LS_JAVA_OPTSStringJVM heap size, it should be no less than 4GB and no more than 8GB (maximum of 50-75% of total RAM)No-Xmx2g -Xms2g
ES_ELASTICStringElasticSearch Logstash user passwordYesdev_password_only
ES_HOSTSStringElasticsearch connection stringYesanalytics-datastore-elastic-search:9200
KIBANA_SSLBooleanSSL protocol requirementNoTrue
LOGSTASH_MEMORY_LIMITStringRAM usage limitNo3G
LOGSTASH_MEMORY_RESERVEStringReserved RAMNo500M
diff --git a/documentation/packages/fhir-datastore-hapi-fhir/environment-variables.md b/documentation/packages/fhir-datastore-hapi-fhir/environment-variables.md index 9a3d8074..beeaa277 100644 --- a/documentation/packages/fhir-datastore-hapi-fhir/environment-variables.md +++ b/documentation/packages/fhir-datastore-hapi-fhir/environment-variables.md @@ -6,21 +6,4 @@ description: >- # Environment Variables -| Variable Name | Type | Revelance | Required | Default | -| ----------------------------- | ------ | ----------------------------------------------------------------- | -------- | --------------- | -| REPMGR\_PRIMARY\_HOST | String | Service name of the primary replication manager host (PostgreSQL) | No | postgres-1 | -| REPMGR\_PARTNER\_NODES | String | Service names of the replicas of PostgreSQL | Yes | postgres-1 | -| POSTGRES\_REPLICA\_SET | String | PostgreSQL replica set (host and port of the replicas) | Yes | postgres-1:5432 | -| HAPI\_FHIR\_CPU\_LIMIT | Number | CPU limit usage for hapi-fhir service | No | 0 (unlimited) | -| HAPI\_FHIR\_CPU\_RESERVE | Number | Reserved CPU usage for hapi-fhir service | No | 0.05 | -| HAPI\_FHIR\_MEMORY\_LIMIT | String | RAM limit usage for hapi-fhir service | No | 3G | -| HAPI\_FHIR\_MEMORY\_RESERVE | String | Reserved RAM usage for hapi-fhir service | No | 500M | -| HF\_POSTGRES\_CPU\_LIMIT | Number | CPU limit usage for postgreSQL service | No | 0 (unlimited) | -| HF\_POSTGRES\_CPU\_RESERVE | Number | Reserved CPU usage for postgreSQL service | No | 0.05 | -| HF\_POSTGRES\_MEMORY\_LIMIT | String | RAM limit usage for postgreSQL service | No | 3G | -| HF\_POSTGRES\_MEMORY\_RESERVE | String | Reserved RAM usage for hapi-fhir service | No | 500M | -| HAPI\_FHIR\_INSTANCES | Number | Number of hapi-fhir service replicas | No | 1 | -| HF\_POSTGRESQL\_USERNAME | String | Hapi-fhir PostgreSQL username | Yes | admin | -| HF\_POSTGRESQL\_PASSWORD | String | Hapi-fhir PostgreSQL password | Yes | instant101 | -| HF\_POSTGRESQL\_DATABASE | String | Hapi-fhir PostgreSQL database | No | hapi | -| REPMGR\_PASSWORD | Strign | hapi-fhir PostgreSQL Replication Manager username | Yes | | +
Variable NameTypeRevelanceRequiredDefault
REPMGR_PRIMARY_HOSTStringService name of the primary replication manager host (PostgreSQL)Nopostgres-1
REPMGR_PARTNER_NODESStringService names of the replicas of PostgreSQLYespostgres-1
POSTGRES_REPLICA_SETStringPostgreSQL replica set (host and port of the replicas)Yespostgres-1:5432
HAPI_FHIR_CPU_LIMITNumberCPU limit usage for hapi-fhir serviceNo0 (unlimited)
HAPI_FHIR_CPU_RESERVENumberReserved CPU usage for hapi-fhir serviceNo0.05
HAPI_FHIR_MEMORY_LIMITStringRAM limit usage for hapi-fhir serviceNo3G
HAPI_FHIR_MEMORY_RESERVEStringReserved RAM usage for hapi-fhir serviceNo500M
HF_POSTGRES_CPU_LIMITNumberCPU limit usage for postgreSQL serviceNo0 (unlimited)
HF_POSTGRES_CPU_RESERVENumberReserved CPU usage for postgreSQL serviceNo0.05
HF_POSTGRES_MEMORY_LIMITStringRAM limit usage for postgreSQL serviceNo3G
HF_POSTGRES_MEMORY_RESERVEStringReserved RAM usage for hapi-fhir serviceNo500M
HAPI_FHIR_INSTANCESNumberNumber of hapi-fhir service replicas No1
HF_POSTGRESQL_USERNAMEStringHapi-fhir PostgreSQL usernameYesadmin
HF_POSTGRESQL_PASSWORDStringHapi-fhir PostgreSQL passwordYesinstant101
HF_POSTGRESQL_DATABASEStringHapi-fhir PostgreSQL databaseNohapi
REPMGR_PASSWORDStrignhapi-fhir PostgreSQL Replication Manager username Yes
diff --git a/documentation/packages/interoperability-layer-openhim/environment-variables.md b/documentation/packages/interoperability-layer-openhim/environment-variables.md index c50b040a..53d19225 100644 --- a/documentation/packages/interoperability-layer-openhim/environment-variables.md +++ b/documentation/packages/interoperability-layer-openhim/environment-variables.md @@ -6,24 +6,4 @@ description: >- # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| --------------------------------- | ------ | --------------------------------------- | -------- | ------------------------------- | -| OPENHIM\_CORE\_MEDIATOR\_HOSTNAME | String | Hostname of the Openhim mediator | Yes | localhost | -| OPENHIM\_MEDIATOR\_API\_PORT | Number | Port of the Openhim mediator | Yes | 8080 | -| OPENHIM\_CORE\_INSTANCES | Number | Number of openhim-core instances | No | 1 | -| OPENHIM\_CONSOLE\_INSTANCES | String | Number of openhim-console instances | No | 1 | -| OPENHIM\_MONGO\_URL | String | MongoDB connection string | Yes | mongodb://mongo-1:27017/openhim | -| OPENHIM\_MONGO\_ATNAURL | String | ??????????? | Yes | mongodb://mongo-1:27017/openhim | -| OPENHIM\_CPU\_LIMIT | Number | CPU limit usage for openhim-core | No | 0 | -| OPENHIM\_CPU\_RESERVE | Number | Reserverd CPU usage for openhim-core | No | 0.05 | -| OPENHIM\_MEMORY\_LIMIT | String | RAM usage limit for openhim-core | No | 3G | -| OPENHIM\_MEMORY\_RESERVE | String | Reserved RAM for openhim-core | No | 500M | -| OPENHIM\_CONSOLE\_CPU\_LIMIT | Number | CPU limit usage for openhim-console | No | 0 | -| OPENHIM\_CONSOLE\_CPU\_RESERVE | Number | Reserverd CPU usage for openhim-console | No | 0.05 | -| OPENHIM\_CONSOLE\_MEMORY\_LIMIT | String | RAM usage limit for openhim-console | No | 2G | -| OPENHIM\_CONSOLE\_MEMORY\_RESERVE | String | Reserved RAM for openhim-console | No | 500M | -| OPENHIM\_MONGO\_CPU\_LIMIT | Number | CPU limit usage for mongo | No | 0 | -| OPENHIM\_MONGO\_CPU\_RESERVE | Number | Reserverd CPU usage for mongo | No | 0.05 | -| OPENHIM\_MONGO\_MEMORY\_LIMIT | String | RAM usage limit for mongo | No | 3G | -| OPENHIM\_MONGO\_MEMORY\_RESERVE | String | Reserved RAM for mongo | No | 500M | -| MONGO\_SET\_COUNT | Number | Number of instances of Mongo | YES | 1 | +
Variable NameTypeRelevanceRequiredDefault
OPENHIM_CORE_MEDIATOR_HOSTNAMEStringHostname of the Openhim mediatorYeslocalhost
OPENHIM_MEDIATOR_API_PORTNumberPort of the Openhim mediatorYes8080
OPENHIM_CORE_INSTANCESNumberNumber of openhim-core instancesNo1
OPENHIM_CONSOLE_INSTANCESStringNumber of openhim-console instancesNo1
OPENHIM_MONGO_URLStringMongoDB connection stringYesmongodb://mongo-1:27017/openhim
OPENHIM_MONGO_ATNAURLString???????????Yesmongodb://mongo-1:27017/openhim
OPENHIM_CPU_LIMITNumberCPU limit usage for openhim-coreNo0
OPENHIM_CPU_RESERVENumberReserverd CPU usage for openhim-coreNo0.05
OPENHIM_MEMORY_LIMITStringRAM usage limit for openhim-coreNo3G
OPENHIM_MEMORY_RESERVEStringReserved RAM for openhim-coreNo500M
OPENHIM_CONSOLE_CPU_LIMITNumberCPU limit usage for openhim-consoleNo0
OPENHIM_CONSOLE_CPU_RESERVENumberReserverd CPU usage for openhim-consoleNo0.05
OPENHIM_CONSOLE_MEMORY_LIMITStringRAM usage limit for openhim-consoleNo2G
OPENHIM_CONSOLE_MEMORY_RESERVEStringReserved RAM for openhim-consoleNo500M
OPENHIM_MONGO_CPU_LIMITNumberCPU limit usage for mongoNo0
OPENHIM_MONGO_CPU_RESERVENumberReserverd CPU usage for mongoNo0.05
OPENHIM_MONGO_MEMORY_LIMITStringRAM usage limit for mongoNo3G
OPENHIM_MONGO_MEMORY_RESERVEStringReserved RAM for mongoNo500M
MONGO_SET_COUNTNumberNumber of instances of MongoYES1
diff --git a/documentation/packages/kafka-mapper-consumer/environment-variables.md b/documentation/packages/kafka-mapper-consumer/environment-variables.md index 13af9960..71e06e28 100644 --- a/documentation/packages/kafka-mapper-consumer/environment-variables.md +++ b/documentation/packages/kafka-mapper-consumer/environment-variables.md @@ -8,9 +8,4 @@ description: >- -| Variable Name | Type | Relevance | Required | Default | -| ---------------- | ------ | ------------------- | -------- | ------------------------------ | -| KAFKA\_HOST | String | Kafka hostname | No | kafka | -| KAFKA\_PORT | Number | Kafka port | No | 9092 | -| CLICKHOUSE\_HOST | String | Clickhouse hostname | No | analytics-datastore-clickhouse | -| CLICKHOUSE\_PORT | String | Clickhouse port | No | 8123 | +
Variable NameTypeRelevanceRequiredDefault
KAFKA_HOSTStringKafka hostnameNokafka
KAFKA_PORTNumberKafka portNo9092
CLICKHOUSE_HOSTStringClickhouse hostnameNoanalytics-datastore-clickhouse
CLICKHOUSE_PORTStringClickhouse portNo8123
diff --git a/documentation/packages/kafka-unbundler-consumer/environment-variables.md b/documentation/packages/kafka-unbundler-consumer/environment-variables.md index be17c79e..493461b8 100644 --- a/documentation/packages/kafka-unbundler-consumer/environment-variables.md +++ b/documentation/packages/kafka-unbundler-consumer/environment-variables.md @@ -4,7 +4,4 @@ description: A kafka processor to unbundle resources into their own kafka topics # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ------------- | ------ | -------------- | -------- | ------- | -| KAFKA\_HOST | String | Kafka hostname | No | kafka | -| KAFKA\_PORT | Number | Kafka port | No | 9092 | +
Variable NameTypeRelevanceRequiredDefault
KAFKA_HOSTStringKafka hostnameNokafka
KAFKA_PORTNumberKafka portNo9092
diff --git a/documentation/packages/message-bus-helper-hapi-proxy/environment-variables.md b/documentation/packages/message-bus-helper-hapi-proxy/environment-variables.md index 7b2f4a91..9dd2c1a8 100644 --- a/documentation/packages/message-bus-helper-hapi-proxy/environment-variables.md +++ b/documentation/packages/message-bus-helper-hapi-proxy/environment-variables.md @@ -4,13 +4,4 @@ description: Listed in this page are all environment variables needed to run Hap # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ------------------------------ | ------ | -------------------------------------- | -------- | -------------------------- | -| HAPI\_SERVER\_URL | String | Hapi-fhir server URL | No | http://hapi-fhir:8080/fhir | -| KAFKA\_BOOTSTRAP\_SERVERS | String | Kafka server | No | kafka:9092 | -| HAPI\_SERVER\_VALIDATE\_FORMAT | String | Path to the service configuration file | No | kibana-kibana.yml | -| HAPI\_PROXY\_INSTANCES | Number | Number of instances of hapi-proxy | No | 1 | -| HAPI\_PROXY\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| HAPI\_PROXY\_CPU\_RESERVE | Number | Reserved CPU usage | No | 0.05 | -| HAPI\_PROXY\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| HAPI\_PROXY\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | +
Variable NameTypeRelevanceRequiredDefault
HAPI_SERVER_URLStringHapi-fhir server URLNohttp://hapi-fhir:8080/fhir
KAFKA_BOOTSTRAP_SERVERSStringKafka serverNokafka:9092
HAPI_SERVER_VALIDATE_FORMATStringPath to the service configuration fileNokibana-kibana.yml
HAPI_PROXY_INSTANCESNumberNumber of instances of hapi-proxyNo1
HAPI_PROXY_CPU_LIMITNumberCPU usage limitNo0
HAPI_PROXY_CPU_RESERVENumberReserved CPU usageNo0.05
HAPI_PROXY_MEMORY_LIMITStringRAM usage limitNo3G
HAPI_PROXY_MEMORY_RESERVEStringReserved RAMNo500M
diff --git a/documentation/packages/message-bus-kafka/environment-variables.md b/documentation/packages/message-bus-kafka/environment-variables.md index db5368e9..79ad9377 100644 --- a/documentation/packages/message-bus-kafka/environment-variables.md +++ b/documentation/packages/message-bus-kafka/environment-variables.md @@ -6,27 +6,5 @@ description: >- # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| -------------------------- | ------ | ---------------- | -------- | ------- | -| KAFKA\_INSTANCES | Number | Service replicas | No | 1 | -| KAFKA\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| KAFKA\_CPU\_RESERVE | Number | Reserved CPU | No | 0.05 | -| KAFKA\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| KAFKA\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | -| KAFKA\_TOPICS | String | Kafka topics | Yes | | -| | | | | | -| ZOOKEEPER\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| ZOOKEEPER\_CPU\_RESERVE | Number | Reserved CPU | No | 0.05 | -| ZOOKEEPER\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| ZOOKEEPER\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | -| | | | | | -| KMINION\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| KMINION\_CPU\_RESERVE | Number | Reserved CPU | No | 0.05 | -| KMINION\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| KMINION\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | -| | | | | | -| KAFDROP\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| KAFDROP\_CPU\_RESERVE | Number | Reserved CPU | No | 0.05 | -| KAFDROP\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| KAFDROP\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | +
Variable NameTypeRelevanceRequiredDefault
KAFKA_INSTANCESNumberService replicasNo1
KAFKA_CPU_LIMITNumberCPU usage limitNo0
KAFKA_CPU_RESERVENumberReserved CPUNo0.05
KAFKA_MEMORY_LIMITStringRAM usage limitNo3G
KAFKA_MEMORY_RESERVEStringReserved RAMNo500M
KAFKA_TOPICSStringKafka topicsYes
ZOOKEEPER_CPU_LIMITNumberCPU usage limitNo0
ZOOKEEPER_CPU_RESERVENumberReserved CPUNo0.05
ZOOKEEPER_MEMORY_LIMITStringRAM usage limitNo3G
ZOOKEEPER_MEMORY_RESERVEStringReserved RAMNo500M
KMINION_CPU_LIMITNumberCPU usage limitNo0
KMINION_CPU_RESERVENumberReserved CPUNo0.05
KMINION_MEMORY_LIMITStringRAM usage limitNo3G
KMINION_MEMORY_RESERVEStringReserved RAMNo500M
KAFDROP_CPU_LIMITNumberCPU usage limitNo0
KAFDROP_CPU_RESERVENumberReserved CPUNo0.05
KAFDROP_MEMORY_LIMITStringRAM usage limitNo3G
KAFDROP_MEMORY_RESERVEStringReserved RAMNo500M
diff --git a/documentation/packages/monitoring/environment-variables.md b/documentation/packages/monitoring/environment-variables.md index 2c09a916..c20483f0 100644 --- a/documentation/packages/monitoring/environment-variables.md +++ b/documentation/packages/monitoring/environment-variables.md @@ -6,7 +6,4 @@ description: >- # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ----------------------------- | ------ | --------------------------- | -------- | ------------------- | -| GF\_SECURITY\_ADMIN\_USER | String | Username of Grafana service | No | admin | -| GF\_SECURITY\_ADMIN\_PASSWORD | String | Password of Grafana service | No | dev\_password\_only | +
Variable NameTypeRelevanceRequiredDefault
GF_SECURITY_ADMIN_USERStringUsername of Grafana serviceNoadmin
GF_SECURITY_ADMIN_PASSWORDStringPassword of Grafana serviceNodev_password_only
diff --git a/documentation/packages/reverse-proxy-nginx/environment-variables.md b/documentation/packages/reverse-proxy-nginx/environment-variables.md index 8aaa9a31..0c34bbd2 100644 --- a/documentation/packages/reverse-proxy-nginx/environment-variables.md +++ b/documentation/packages/reverse-proxy-nginx/environment-variables.md @@ -6,14 +6,4 @@ description: >- # Environment Variables -| Variable Name | Type | Relevance | Required | Default | -| ------------------------- | ------ | ------------------------------------------------- | -------- | --------- | -| DOMAIN\_NAME | String | Domain name | Yes | localhost | -| SUBDOMAINS | String | Subdomain names | Yes | | -| RENEWAL\_EMAIL | String | Renewal email | Yes | | -| REVERSE\_PROXY\_INSTANCES | Number | Number of instances | No | 1 | -| STAGING | String | Generate fake or real certificate (true for fake) | No | false | -| NGINX\_CPU\_LIMIT | Number | CPU usage limit | No | 0 | -| NGINX\_CPU\_RESERVE | Number | Reserved CPU | No | 0.05 | -| NGINX\_MEMORY\_LIMIT | String | RAM usage limit | No | 3G | -| NGINX\_MEMORY\_RESERVE | String | Reserved RAM | No | 500M | +
Variable NameTypeRelevanceRequiredDefault
DOMAIN_NAMEStringDomain nameYeslocalhost
SUBDOMAINSStringSubdomain namesYes
RENEWAL_EMAILStringRenewal emailYes
REVERSE_PROXY_INSTANCESNumberNumber of instancesNo1
STAGINGStringGenerate fake or real certificate (true for fake)Nofalse
NGINX_CPU_LIMITNumberCPU usage limitNo0
NGINX_CPU_RESERVENumberReserved CPUNo0.05
NGINX_MEMORY_LIMITStringRAM usage limitNo3G
NGINX_MEMORY_RESERVEStringReserved RAMNo500M
diff --git a/documentation/provisioning-up-remote-servers/README.md b/documentation/provisioning-up-remote-servers/README.md index 51e9d9b5..0732f007 100644 --- a/documentation/provisioning-up-remote-servers/README.md +++ b/documentation/provisioning-up-remote-servers/README.md @@ -1,12 +1,14 @@ --- -description: A package for deployment tools. +description: Infrastructure tools for the OpenHIM Platform --- -# Provisioning up remote servers +# Provisioning remote servers -This package contains two folders: ansible and terraform. +As part of the OpenHIM Platform Github repository we also provide scripts to easily setup new servers. The Terraform script are able to instanciate server in AWS and the Ansible script are able to configure those server to be ready to accept OpenHIM Platform packages. -## Ansible +## Ansible + +See [here](https://github.com/jembi/platform/tree/main/infrastructure/ansible). It is used for: @@ -19,4 +21,4 @@ In the inventories, there is different environment configuration (development, p ## Terraform -It is used to create and set AWS servers. +Is used to create and set AWS servers. See [here](https://github.com/jembi/platform/tree/main/infrastructure/terraform). diff --git a/documentation/recipes/README.md b/documentation/recipes/README.md new file mode 100644 index 00000000..5382f24f --- /dev/null +++ b/documentation/recipes/README.md @@ -0,0 +1,13 @@ +--- +description: Pre-defined recipes for common use cases +--- + +# 📜 Recipes + +OpenHIM platform comes bundles with a set of generic packages that can be deployed and configured to support a number of different use cases. To help users of OpenHIM Platform get started with something they can make use of immediately, a number of default OpenHIM Platform reciepes are provided. These help you get started with everything you need setup and configured for a particular use case. + +We current support the following default recipes: + + + +
Central Data Repository with Data WarehouseA FHIR-based Shared Health record linked to an MPI for linking and matching patient demographics and a default reporting pipeline to transform and visualise FHIR data.central-data-repository-with-data-warehousing.md
Central Data RepositoryA FHIR-based Shared Health record linked to an MPI for linking and matching patient demographics. No reporting is include but all FHIR data is pushed to Kafka for external system to use.central-data-repository-no-reporting.md
Master Patient IndexA master patient index setup using JeMPI. it also includes OpenHIM as the API gateway providing security, a mapping mediator to allow FHIR-based communication with JeMPI and Keycloak to support user management.master-patient-index.md
diff --git a/documentation/recipes/central-data-repository-no-reporting.md b/documentation/recipes/central-data-repository-no-reporting.md new file mode 100644 index 00000000..b827717b --- /dev/null +++ b/documentation/recipes/central-data-repository-no-reporting.md @@ -0,0 +1,20 @@ +# Central Data repository (no reporting) + +{% hint style="warning" %} +Note: This recipe is in a pre-release alpha stage. It's usable but do so at your own risk. +{% endhint %} + +This recipe sets up an HIE that does the following: + +* Accept FHIR bundles submitted securely through an IOL (OpenHIM) +* Stores Clinical FHIR data to a FHIR store (HAPI FHIR) +* Stores Patient Demographic data to an MPI (JeMPI) +* Pushes FHIR resources to Kafka for other external systems to use + +To launch this package in dev mode copy and paste this into your terminal in a new folder (ensure you have the [instant CLI installed](https://jembi.gitbook.io/instant-v2/getting-started/quick-start)): + +```bash +wget https://raw.githubusercontent.com/jembi/platform/main/cdr.env && \ +wget https://raw.githubusercontent.com/jembi/platform/main/config.yaml && \ +instant package init -p cdr --dev +``` diff --git a/documentation/recipes/central-data-repository-with-data-warehousing.md b/documentation/recipes/central-data-repository-with-data-warehousing.md new file mode 100644 index 00000000..1efcf88e --- /dev/null +++ b/documentation/recipes/central-data-repository-with-data-warehousing.md @@ -0,0 +1,49 @@ +# Central Data Repository with Data Warehousing + + + +{% hint style="warning" %} +Note: This recipe is in a pre-release alpha stage. It's usable but do so at your own risk. +{% endhint %} + +This recipe sets up an HIE that does the following: + +* Accept FHIR bundles submitted securely through an IOL (OpenHIM) +* Stores Clinical FHIR data to a FHIR store (HAPI FHIR) +* Stores Patient Demographic data to an MPI (JeMPI) +* Pushes FHIR resources to Kafka for the reporting pipeline (and other systems) to use +* Pulls FHIR data out of Kafka and maps it to flattened tables in the Data Warehouse (Clickhouse) +* Allows for the Data Warehouse data to be visualised via a BI tool (Apache Superset) + +To launch this package in dev mode copy and paste this into your terminal in a new folder (ensure you have the [instant CLI installed](https://jembi.gitbook.io/instant-v2/getting-started/quick-start)): + +```bash +wget https://raw.githubusercontent.com/jembi/platform/main/cdr-dw.env && \ +wget https://raw.githubusercontent.com/jembi/platform/main/config.yaml && \ +instant package init -p cdr-dw --dev +``` + +## Services + +When deployed in `--dev` mode the location of the UIs will be as follows: + +| Service | URL | Auth | +| -------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | +| OpenHIM | [http://localhost:9000/](http://localhost:9000/) |

Test SSO user:
u: test p: dev_password_only

| +| JeMPI | [http://localhost:3033/](http://localhost:3033/) |

Test SSO user:
u: test p: dev_password_only

| +| Superset | [http://localhost:8089/](http://localhost:8089/) |

Test SSO user:
u: test p: dev_password_only

| +| Grafana | [http://localhost:3000/](http://localhost:3000/) |

Test SSO user:
u: test p: dev_password_only

| +| Keycloak | [http://localhost:9088/admin/master/console/#/platform-realm](http://localhost:9088/admin/master/console/#/platform-realm) | u: admin p: dev\_password\_only | + +Extra UIs only exposed in `--dev` mode: + +| Service | URL | Auth | +| --------- | ------------------------------------------------ | ---- | +| Kafdrop | [http://localhost:9013/](http://localhost:9013/) | none | +| HAPI FHIR | [http://localhost:3447/](http://localhost:3447/) | none | + +## Example use + +Use the following example postman collection to see interaction you cna have with the system and see how the system reacts. + +{% embed url="https://www.postman.com/jembi-platform/workspace/jembi-public/collection/23372581-055117db-6827-43d8-bc50-86f06f5a54c6?action=share&creator=23372581" %} diff --git a/documentation/recipes/master-patient-index.md b/documentation/recipes/master-patient-index.md new file mode 100644 index 00000000..44a7124e --- /dev/null +++ b/documentation/recipes/master-patient-index.md @@ -0,0 +1,15 @@ +# Master Patient Index + +{% hint style="warning" %} +Note: This recipe is in a pre-release alpha stage. It's usable but do so at your own risk. +{% endhint %} + +This recipe sets up an HIE that deploys JeMPI behind the OpenHIM with a mapping mediator configured to allow for FHIR-based communication with JeMPI. It also deploys Keycloak for user management and authentication. + +To launch this package in dev mode copy and paste this into your terminal in a new folder (ensure you have the [instant CLI installed](https://jembi.gitbook.io/instant-v2/getting-started/quick-start)): + +```bash +wget https://raw.githubusercontent.com/jembi/platform/main/mpi.env && \ +wget https://raw.githubusercontent.com/jembi/platform/main/config.yaml && \ +instant package init -p mpi --dev +``` diff --git a/fhir-datastore-hapi-fhir/Dockerfile b/fhir-datastore-hapi-fhir/Dockerfile index 63e555d0..0399fa1d 100644 --- a/fhir-datastore-hapi-fhir/Dockerfile +++ b/fhir-datastore-hapi-fhir/Dockerfile @@ -1,4 +1,4 @@ -FROM hapiproject/hapi:v6.0.1 +FROM hapiproject/hapi:v6.10.1 # Copy the static shell into base image COPY --from=busybox:1.36.0-uclibc /bin/sh /bin/sh diff --git a/fhir-datastore-hapi-fhir/docker-compose.yml b/fhir-datastore-hapi-fhir/docker-compose.yml index fd91cc69..fed04b75 100644 --- a/fhir-datastore-hapi-fhir/docker-compose.yml +++ b/fhir-datastore-hapi-fhir/docker-compose.yml @@ -19,6 +19,8 @@ services: - spring.datasource.hikari.idleTimeout=600000 - hapi.fhir.allow_external_references=true - hapi.fhir.bulk_export_enabled=true + - hapi.fhir.ips_enabled=${IPS_ENABLED} + - hapi.fhir.ig_runtime_upload_enabled=${ENABLE_RUNTIME_IG_UPLOAD} - hapi.fhir.enable_repository_validating_interceptor=true - hapi.fhir.fhir_version=${FHIR_VERSION} - JAVA_TOOL_OPTIONS=${HF_JAVA_OPTS} diff --git a/fhir-datastore-hapi-fhir/package-metadata.json b/fhir-datastore-hapi-fhir/package-metadata.json index 040bbe71..241b141a 100644 --- a/fhir-datastore-hapi-fhir/package-metadata.json +++ b/fhir-datastore-hapi-fhir/package-metadata.json @@ -34,10 +34,12 @@ "HF_PGPOOL_CPU_RESERVE": "0.05", "HF_PGPOOL_MEMORY_RESERVE": "50M", "HF_JAVA_OPTS": "-Xmx2g", - "HF_IMAGE_TAG": "v6.0.1-wget", + "HF_IMAGE_TAG": "v6.10.1-wget", "HF_MAX_POOL_SIZE": "80", "HF_PGPOOL_ENABLED": "true", "HF_POSTGRES_FAILOVER": "automatic", - "HF_POSTGRES_DEGRADED_MONITORING_TIMEOUT": "5" + "HF_POSTGRES_DEGRADED_MONITORING_TIMEOUT": "5", + "IPS_ENABLED": "true", + "ENABLE_RUNTIME_IG_UPLOAD": "false" } } diff --git a/identity-access-manager-keycloak/package-metadata.json b/identity-access-manager-keycloak/package-metadata.json index 2a9196c7..928ca89a 100644 --- a/identity-access-manager-keycloak/package-metadata.json +++ b/identity-access-manager-keycloak/package-metadata.json @@ -26,7 +26,7 @@ "KC_GRAFANA_CLIENT_SECRET": "CV14QfwnpYFj1IH5dK5lScPNCYAIYP1c", "KC_GRAFANA_ROOT_URL": "http://localhost:3000", "KC_GRAFANA_CLIENT_ROLES": "admin,editor,viewer", - "KC_JEMPI_SSO_ENABLED": "true", + "KC_JEMPI_SSO_ENABLED": "false", "KC_JEMPI_CLIENT_ID": "jempi-oauth", "KC_JEMPI_CLIENT_SECRET": "Tbe3llP5OJIlqUjz7K1wPp8YDAdCOEMn", "KC_JEMPI_ROOT_URL": "http://localhost:3033", diff --git a/infrastructure/ansible/inventories/development/group_vars/all.yml b/infrastructure/ansible/inventories/development/group_vars/all.yml index ad00f74f..854733f1 100644 --- a/infrastructure/ansible/inventories/development/group_vars/all.yml +++ b/infrastructure/ansible/inventories/development/group_vars/all.yml @@ -8,22 +8,6 @@ sudoers: username: ryancrichton state: present key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCrGR7DPVKSIMjWO7m8vq6PFBJT8vmiiJRguePcuff8DUvUYmH/imAcvlcswZvnrlZgEtHLdleYKii4Fc/HcNwfoQOdxbdDbRamxhdSr+sHa4V97ziMi1+eXdJOItXjF9+ZyeFG0P2d8BH+6+X8NZ9q+3iM4iEg/08LqXyLU52rcRutmHo+3K0pZQETHOeiiIxBHzgtlDZIMclw12U5NU/f7W5+9jfDatLDhIO13x4iTeES0d5B5RivBtJoQb1y0glqgVtrCDuRgMk/p51Ravf7a8eFHgcVWYhcVqjakOviWMeHHnPI4IQSLK2yXGArsMurOQwoZ16r03Vse9mf3rmrLf30fD6cWdyRxgSkuWCDqJUvGQiGIJfybfsPLz/EPSvGjRx96DFPoqZ5meuLZW6bvQx5FjTLUU/G5KKvFujEkhL0WPTZO+XikIJVgvMp7DtgbOqRCDYcJM1F2oyccAnitmh0k43O2VkpKN8HYzVM1YAl7oxY4XdWbuAvZpNi7814WaM3/SCo5iKi39EbuQ0m8mDWloxroDtpc3RtY6gRWjx8exndpd6gD8yEKxSuzIwpdaWp1gGreCs+0/QPu4QjhQQuKPfrOKksJhaNF59i1Ww9RtRYW8R8wkgJYznxAKlyWDcYMc2PIvFPLLAvOD9JLouCBwjF3h07EFYZ+90wUw== - - name: michael.loosen@jembi.org - username: michaelloosen - state: present - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINGNVNlWwpQKVXPSngEOOdJjuCyVEQoBXv+nHqmcM6vW - - name: barry.dwyer@jembi.org - username: barrydwyer - state: absent - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICw4vvV33xxrTzxAiPoRhuwyWUzmgrP5NPo2n4bmKx7P - - name: mark.labuschagne@jembi.org - username: marklabuschagne - state: absent - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHa1ghm9JsRl6JDtWU8AV+U70jehqlWaXmpjRg3afqB4 - - name: castello.govender@jembi.org - username: castellogovender - state: absent - key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDWl3Vc+v0fiuVBIVTDehGGUgZqfvn0JG2oNS7oMV9X2s3p8VwndKm6/oa5bOO4+sp9Z/8ychvo7S2MNXBRQAzTEXo5Oej5E/aZqaFzvpjgI79cnSidIHTzOUw0rUk2EwpSCpJVSXCjSjxGLbbLI35+KltLVoNXGmpNFq4Xi7XwT9aXIm5KQcbumV1NFRGPl+XiaRHioo37gFtub9XjODdQFfs9KGMS2oEuzzFFaXj5unUDZo+lZlEp67HHwq6EpDLR5xf7l96xBvMpQ/MS2pTxCoHVqXx3IDL5CXBBrgUKS8lUuhPPUJxPeARz4DuU7SmWxhNwTDspAEROQ8Rsq6M+kvdslFVeJ4O5M2Rs75mDTUCAkmZTixkTMuMmWqBuQT3zPA7d8Gf0YLhKfwgyH8y5YeJgpU01o2wbH6em/n2isI+pSIatdUFQAKuFdzJiawQJEB5UobKwN2tKVP8ImjrfN+q8nuqHZwTTGW0AaCrb985aQFTgMbYwQeR3mkBNOE0= - name: lumiere@symbionix.co username: lumieremondo state: present @@ -36,6 +20,10 @@ sudoers: username: arran state: present key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB8edPB2Qhi0DGEgSTel4yNo9Wa68g4UtHYipaVfaMN8 arran@LAPTOP-2GA5K346 + - name: matthew.erispe@jembi.org + username: matthewerispe + state: present + key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCwfQ8ypu5+4rGNfMeSzB+ADffE4qT/519AFVDTIWgV4FaJipsgHdYFTVnxkJHAiixTKAqEJ2SF1RJ2/pIH2hX5p2GNWhrXuOmnqiOZEByPSsqidSNS8aMCfIzt7UZ6XvIPhagZnEQ7NCA1BOuOqzWISIEKjSqA3MvfZ5kniis5J9cbTqkGB+pgW75fuQIxtmK7iCZbwuXOO735PzkKgeT/vfg4fTlbmOeikSNO6QaamGRZ+NJfprCIx5j5GKc970k6V9d4RcQltHojgWI7Wl2VRLsprm7Xy2keaTGXfjnCDwQ2cQRUgBqIoWyDoUzNqvDYXrEAfM19sNEICMx90fXaJeGrNbMg2jjWuyPacSnB0moDtECt4zd2svozGEWnVvdY8KDAWgMbTUlrufNOzh7hyrDMpaBSmSN9npyTpekM0kWlykfU++oEexXMi41b0cZLJc9ocPJCeKZ0fzpachHW35zsUh2E6OCgbXdDxNugVrG4ecz9nUqjJGyYhCX6mhz9VxpB46wEPQ/E2rTsnIfd+xMYoyLqLzCfKf/Z8CL8/Ifq7uDRxczRmo5sCrjByOdwyUGY+JoFQMc5OcEzfyepVmcwfuhuASBGAZOr1zM1ituqG614d7W+GDRHwCeAS8zUZKFdYvL5C4nOg+OxojqnC8zSGwN3ZcnMXHvvzKdQHw== docker_users: - name: bradford.sawadye@jembi.org @@ -46,22 +34,6 @@ docker_users: username: ryancrichton state: present key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCrGR7DPVKSIMjWO7m8vq6PFBJT8vmiiJRguePcuff8DUvUYmH/imAcvlcswZvnrlZgEtHLdleYKii4Fc/HcNwfoQOdxbdDbRamxhdSr+sHa4V97ziMi1+eXdJOItXjF9+ZyeFG0P2d8BH+6+X8NZ9q+3iM4iEg/08LqXyLU52rcRutmHo+3K0pZQETHOeiiIxBHzgtlDZIMclw12U5NU/f7W5+9jfDatLDhIO13x4iTeES0d5B5RivBtJoQb1y0glqgVtrCDuRgMk/p51Ravf7a8eFHgcVWYhcVqjakOviWMeHHnPI4IQSLK2yXGArsMurOQwoZ16r03Vse9mf3rmrLf30fD6cWdyRxgSkuWCDqJUvGQiGIJfybfsPLz/EPSvGjRx96DFPoqZ5meuLZW6bvQx5FjTLUU/G5KKvFujEkhL0WPTZO+XikIJVgvMp7DtgbOqRCDYcJM1F2oyccAnitmh0k43O2VkpKN8HYzVM1YAl7oxY4XdWbuAvZpNi7814WaM3/SCo5iKi39EbuQ0m8mDWloxroDtpc3RtY6gRWjx8exndpd6gD8yEKxSuzIwpdaWp1gGreCs+0/QPu4QjhQQuKPfrOKksJhaNF59i1Ww9RtRYW8R8wkgJYznxAKlyWDcYMc2PIvFPLLAvOD9JLouCBwjF3h07EFYZ+90wUw== - - name: michael.loosen@jembi.org - username: michaelloosen - state: present - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINGNVNlWwpQKVXPSngEOOdJjuCyVEQoBXv+nHqmcM6vW - - name: barry.dwyer@jembi.org - username: barrydwyer - state: absent - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICw4vvV33xxrTzxAiPoRhuwyWUzmgrP5NPo2n4bmKx7P - - name: mark.labuschagne@jembi.org - username: marklabuschagne - state: absent - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHa1ghm9JsRl6JDtWU8AV+U70jehqlWaXmpjRg3afqB4 - - name: castello.govender@jembi.org - username: castellogovender - state: absent - key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDWl3Vc+v0fiuVBIVTDehGGUgZqfvn0JG2oNS7oMV9X2s3p8VwndKm6/oa5bOO4+sp9Z/8ychvo7S2MNXBRQAzTEXo5Oej5E/aZqaFzvpjgI79cnSidIHTzOUw0rUk2EwpSCpJVSXCjSjxGLbbLI35+KltLVoNXGmpNFq4Xi7XwT9aXIm5KQcbumV1NFRGPl+XiaRHioo37gFtub9XjODdQFfs9KGMS2oEuzzFFaXj5unUDZo+lZlEp67HHwq6EpDLR5xf7l96xBvMpQ/MS2pTxCoHVqXx3IDL5CXBBrgUKS8lUuhPPUJxPeARz4DuU7SmWxhNwTDspAEROQ8Rsq6M+kvdslFVeJ4O5M2Rs75mDTUCAkmZTixkTMuMmWqBuQT3zPA7d8Gf0YLhKfwgyH8y5YeJgpU01o2wbH6em/n2isI+pSIatdUFQAKuFdzJiawQJEB5UobKwN2tKVP8ImjrfN+q8nuqHZwTTGW0AaCrb985aQFTgMbYwQeR3mkBNOE0= - name: lumiere@symbionix.co username: lumieremondo state: present @@ -74,8 +46,12 @@ docker_users: username: arran state: present key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB8edPB2Qhi0DGEgSTel4yNo9Wa68g4UtHYipaVfaMN8 arran@LAPTOP-2GA5K346 + - name: matthew.erispe@jembi.org + username: matthewerispe + state: present + key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCwfQ8ypu5+4rGNfMeSzB+ADffE4qT/519AFVDTIWgV4FaJipsgHdYFTVnxkJHAiixTKAqEJ2SF1RJ2/pIH2hX5p2GNWhrXuOmnqiOZEByPSsqidSNS8aMCfIzt7UZ6XvIPhagZnEQ7NCA1BOuOqzWISIEKjSqA3MvfZ5kniis5J9cbTqkGB+pgW75fuQIxtmK7iCZbwuXOO735PzkKgeT/vfg4fTlbmOeikSNO6QaamGRZ+NJfprCIx5j5GKc970k6V9d4RcQltHojgWI7Wl2VRLsprm7Xy2keaTGXfjnCDwQ2cQRUgBqIoWyDoUzNqvDYXrEAfM19sNEICMx90fXaJeGrNbMg2jjWuyPacSnB0moDtECt4zd2svozGEWnVvdY8KDAWgMbTUlrufNOzh7hyrDMpaBSmSN9npyTpekM0kWlykfU++oEexXMi41b0cZLJc9ocPJCeKZ0fzpachHW35zsUh2E6OCgbXdDxNugVrG4ecz9nUqjJGyYhCX6mhz9VxpB46wEPQ/E2rTsnIfd+xMYoyLqLzCfKf/Z8CL8/Ifq7uDRxczRmo5sCrjByOdwyUGY+JoFQMc5OcEzfyepVmcwfuhuASBGAZOr1zM1ituqG614d7W+GDRHwCeAS8zUZKFdYvL5C4nOg+OxojqnC8zSGwN3ZcnMXHvvzKdQHw== -firewall_subnet_restriction: '10.1.10.0/16' +firewall_subnet_restriction: "10.1.10.0/16" # docker_swarm_hostname_1: ip-172-31-36-41 # docker_swarm_hostname_2: ip-172-31-35-12 @@ -85,9 +61,9 @@ firewall_subnet_restriction: '10.1.10.0/16' docker_username: ethiopiacdrbot docker_email: ryan+ethiopiacdrbot@jembi.org docker_password: !vault | - $ANSIBLE_VAULT;1.1;AES256 - 34323064326163323965306565316239366335633632633862333339323965633539376162623138 - 3166356333333635656337383236306535343164636632640a323031326166653339663162663763 - 39356130373863326163306139343332396262353036333532303530383363376237336138336136 - 3262303664386464380a613834373538376639373330363361396165333533343137613464653761 - 36383536633362653535343666333731333630383639613065393935346336613636 + $ANSIBLE_VAULT;1.1;AES256 + 34323064326163323965306565316239366335633632633862333339323965633539376162623138 + 3166356333333635656337383236306535343164636632640a323031326166653339663162663763 + 39356130373863326163306139343332396262353036333532303530383363376237336138336136 + 3262303664386464380a613834373538376639373330363361396165333533343137613464653761 + 36383536633362653535343666333731333630383639613065393935346336613636 diff --git a/interoperability-layer-openhim/docker-compose.yml b/interoperability-layer-openhim/docker-compose.yml index 1d55306b..2a2073f3 100644 --- a/interoperability-layer-openhim/docker-compose.yml +++ b/interoperability-layer-openhim/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.9' services: openhim-core: - image: jembi/openhim-core:v8.3.0 + image: jembi/openhim-core:v8.4.0 networks: kafka: hapi-fhir: @@ -17,7 +17,8 @@ services: - api_authenticationTypes=["token", "basic", "openid", "local"] - authentication_enableCustomTokenAuthentication=true - router_kafkaBrokers=${KAFKA_HOSTS} - - api_openid_url=${KC_FRONTEND_URL}/realms/${KC_REALM_NAME} + - api_openid_url=${KC_API_URL}/realms/${KC_REALM_NAME} + - api_openid_issuerUrl=${KC_FRONTEND_URL}/realms/${KC_REALM_NAME} - api_openid_callbackUrl=${KC_OPENHIM_ROOT_URL} - api_openid_clientId=${KC_OPENHIM_CLIENT_ID} - api_openid_clientSecret=${KC_OPENHIM_CLIENT_SECRET} @@ -46,6 +47,7 @@ services: KC_OPENHIM_CLIENT_ID: ${KC_OPENHIM_CLIENT_ID} KC_REALM_NAME: ${KC_REALM_NAME} KC_FRONTEND_URL: ${KC_FRONTEND_URL} + OPENHIM_CONSOLE_SHOW_LOGIN: ${OPENHIM_CONSOLE_SHOW_LOGIN} networks: reverse-proxy: keycloak: diff --git a/interoperability-layer-openhim/package-metadata.json b/interoperability-layer-openhim/package-metadata.json index 286e34eb..3158f12f 100644 --- a/interoperability-layer-openhim/package-metadata.json +++ b/interoperability-layer-openhim/package-metadata.json @@ -19,6 +19,7 @@ "OPENHIM_CONSOLE_CPU_RESERVE": "0.05", "OPENHIM_CONSOLE_MEMORY_LIMIT": "2G", "OPENHIM_CONSOLE_MEMORY_RESERVE": "500M", + "OPENHIM_CONSOLE_SHOW_LOGIN": "true", "OPENHIM_MONGO_CPU_LIMIT": "0", "OPENHIM_MONGO_CPU_RESERVE": "0.05", "OPENHIM_MONGO_MEMORY_LIMIT": "2G", @@ -32,6 +33,6 @@ "KC_OPENHIM_CLIENT_ID": "openhim-oauth", "KC_OPENHIM_CLIENT_SECRET": "tZKfEbWf0Ka5HBNZwFrdSyQH2xT1sNMR", "KC_OPENHIM_ROOT_URL": "http://localhost:9000", - "KC_REALM_FULL_URL": "http://localhost:9088/realms/platform-realm" + "KC_API_URL": "http://identity-access-manager-keycloak:8080" } } diff --git a/interoperability-layer-openhim/swarm.sh b/interoperability-layer-openhim/swarm.sh index f96dd679..cce4fe6c 100644 --- a/interoperability-layer-openhim/swarm.sh +++ b/interoperability-layer-openhim/swarm.sh @@ -57,7 +57,7 @@ function initialize_package() { else config::await_service_running "mongo-1" "${COMPOSE_FILE_PATH}"/docker-compose.await-helper-mongo.yml "1" "$STACK" - try "docker exec -i $(docker ps -q -f name=openhim_mongo) mongo --eval \"rs.initiate()\"" throw "Could not initiate replica set for the single mongo instance. Some services use \ + try "docker exec -i $(docker ps -q -f name=openhim_mongo) mongo --eval \"rs.initiate({'_id': 'mongo-set','members': [{'_id': 0,'priority': 1,'host': 'mongo-1:27017'}]})\"" throw "Could not initiate replica set for the single mongo instance. Some services use \ mongo event listeners which only work with a replica set" fi fi diff --git a/kafka-mapper-consumer/docker-compose.yml b/kafka-mapper-consumer/docker-compose.yml index a0f6cdfe..9b4fd2d8 100644 --- a/kafka-mapper-consumer/docker-compose.yml +++ b/kafka-mapper-consumer/docker-compose.yml @@ -2,10 +2,17 @@ version: '3.9' services: kafka-mapper-consumer: - image: jembi/kafka-mapper-consumer:0.0.3 + image: jembi/kafka-mapper-consumer:0.1.0 environment: KAFKA_HOST: ${KAFKA_HOST} KAFKA_PORT: ${KAFKA_PORT} + KAFKA_FROM_BEGINNING: ${KAFKA_FROM_BEGINNING} + CONSUMER_GROUP_ID: ${CONSUMER_GROUP_ID} + TRUST_SELF_SIGNED: ${TRUST_SELF_SIGNED} + OPENHIM_API_URL: ${OPENHIM_API_URL} + OPENHIM_USERNAME: ${OPENHIM_USERNAME} + OPENHIM_PASSWORD: ${OPENHIM_PASSWORD} + REGISTER_MEDIATOR: ${REGISTER_MEDIATOR} CLICKHOUSE_HOST: ${CLICKHOUSE_HOST} CLICKHOUSE_PORT: ${CLICKHOUSE_PORT} configs: diff --git a/kafka-mapper-consumer/fhir-mapping.json b/kafka-mapper-consumer/fhir-mapping.json index 943ed02f..e15d482f 100644 --- a/kafka-mapper-consumer/fhir-mapping.json +++ b/kafka-mapper-consumer/fhir-mapping.json @@ -3,8 +3,12 @@ "resourceType": "Patient", "tableMappings": [ { - "targetTable": "patient", + "targetTable": "patient_example", "columnMappings": [ + { + "columnName": "goldenId", + "fhirPath": "Patient.link.where(type='refer').other.reference.replace('Patient/', '')" + }, { "columnName": "patientGivenName", "fhirPath": "Patient.name.given" @@ -16,5 +20,23 @@ ] } ] + }, + { + "resourceType": "Observation", + "tableMappings": [ + { + "targetTable": "observation_example", + "columnMappings": [ + { + "columnName": "observationValue", + "fhirPath": "Observation.valueQuantity.value" + }, + { + "columnName": "patientId", + "fhirPath": "Observation.subject.reference.replace('Patient/', '')" + } + ] + } + ] } ] diff --git a/kafka-mapper-consumer/package-metadata.json b/kafka-mapper-consumer/package-metadata.json index 4b6be852..394c7257 100644 --- a/kafka-mapper-consumer/package-metadata.json +++ b/kafka-mapper-consumer/package-metadata.json @@ -4,10 +4,17 @@ "description": "A kafka consumer that maps fhir resources to a flattened data structure", "type": "use-case", "version": "0.0.1", - "dependencies": ["message-bus-kafka"], + "dependencies": ["message-bus-kafka", "interoperability-layer-openhim"], "environmentVariables": { "KAFKA_HOST": "kafka-01", "KAFKA_PORT": "9092", + "KAFKA_FROM_BEGINNING": "false", + "CONSUMER_GROUP_ID": "kafka-mapper-consumer", + "TRUST_SELF_SIGNED": "true", + "OPENHIM_API_URL": "https://openhim-core:8080", + "OPENHIM_USERNAME": "root@openhim.org", + "OPENHIM_PASSWORD": "instant101", + "REGISTER_MEDIATOR": "true", "CLICKHOUSE_HOST": "analytics-datastore-clickhouse", "CLICKHOUSE_PORT": "8123" } diff --git a/mpi-mediator/docker-compose.yml b/mpi-mediator/docker-compose.yml index a263eaa1..7d5a295e 100644 --- a/mpi-mediator/docker-compose.yml +++ b/mpi-mediator/docker-compose.yml @@ -2,10 +2,11 @@ version: '3.9' services: mpi-mediator: - image: jembi/mpi-mediator:v1.1.0 + image: jembi/mpi-mediator:v2.1.0 networks: openhim: kafka: + clickhouse: public: default: environment: @@ -20,6 +21,7 @@ services: MPI_CLIENT_ID: ${MPI_CLIENT_ID} MPI_CLIENT_SECRET: ${MPI_CLIENT_SECRET} MPI_AUTH_ENABLED: ${MPI_AUTH_ENABLED} + MPI_PROXY_URL: ${MPI_PROXY_URL} KAFKA_BROKERS: ${KAFKA_HOSTS} KAFKA_BUNDLE_TOPIC: ${KAFKA_BUNDLE_TOPIC} KAFKA_ASYNC_BUNDLE_TOPIC: ${KAFKA_ASYNC_BUNDLE_TOPIC} @@ -28,6 +30,10 @@ services: FHIR_DATASTORE_HOST: ${FHIR_DATASTORE_HOST} FHIR_DATASTORE_PORT: ${FHIR_DATASTORE_PORT} DISABLE_VALIDATION: ${DISABLE_VALIDATION} + ENABLE_JEMPI_GOLDEN_ID_UPDATE: ${ENABLE_JEMPI_GOLDEN_ID_UPDATE} + CLICKHOUSE_HOST: ${CLICKHOUSE_HOST} + CLICKHOUSE_PORT: ${CLICKHOUSE_PORT} + PATIENT_PROFILE_FOR_STUB_PATIENT: ${PATIENT_PROFILE_FOR_STUB_PATIENT} deploy: placement: max_replicas_per_node: 1 @@ -40,6 +46,9 @@ networks: kafka: name: kafka_public external: true + clickhouse: + name: clickhouse_public + external: true public: name: mpi_public external: true diff --git a/mpi-mediator/importer/volume/openhim-import.json b/mpi-mediator/importer/volume/openhim-import.json index 142f3dc8..9475b1fa 100644 --- a/mpi-mediator/importer/volume/openhim-import.json +++ b/mpi-mediator/importer/volume/openhim-import.json @@ -1,12 +1,11 @@ { "Users": [ { - "groups": [ - "admin" - ], "firstname": "Super", "surname": "User", "email": "root@openhim.org", + "provider": "token", + "groups": ["admin"], "passwordAlgorithm": "sha512", "passwordHash": "ea3824f17cf1379eb118a36bc7c8cf0f45712e2af7748567fca5313dec6fa66d61064e82a5e5cb88e998486ee3c7d0dac235bbeda8c341d6edc1c77406be2ab6", "passwordSalt": "d4f622c0404f09bd959bfb263efa3452", @@ -18,42 +17,96 @@ ], "Clients": [ { - "roles": [ - "instant" - ], - "customTokenID": "test", "clientID": "test", - "name": "Test Client" + "name": "Test Client", + "roles": ["instant"], + "customTokenID": "test" } ], "Channels": [ { + "name": "JeMPI Patient endpoints in fhir", + "description": "JeMPI Patient endpoints in fhir format", + "urlPattern": "^/fhir/Patient/?[^/]*$", "methods": [ "GET", - "POST" + "POST", + "DELETE", + "PUT", + "OPTIONS", + "HEAD", + "TRACE", + "CONNECT", + "PATCH" ], "type": "http", - "allow": [ - "instant" - ], + "tcpPort": null, + "tcpHost": null, + "pollingSchedule": null, + "requestBody": true, + "responseBody": true, + "allow": ["instant"], "whitelist": [], "authType": "private", + "routes": [ + { + "name": "Generic Mapping Mediator", + "type": "http", + "status": "enabled", + "secured": false, + "host": "openhim-mapping-mediator", + "port": 3003, + "path": "", + "pathTransform": "", + "primary": true, + "username": "", + "password": "", + "forwardAuthHeader": true, + "waitPrimaryResponse": false, + "statusCodesCheck": "2**" + } + ], "matchContentTypes": [], + "matchContentRegex": null, + "matchContentXpath": null, + "matchContentJson": null, + "matchContentValue": null, "properties": [], "txViewAcl": [], "txViewFullAcl": [], "txRerunAcl": [], + "alerts": [], "status": "enabled", "rewriteUrls": false, "addAutoRewriteRules": true, + "rewriteUrlsConfig": [], "autoRetryEnabled": false, "autoRetryPeriodMinutes": 60, + "updatedBy": { + "id": "65d47e98223a0672c3c9808b", + "name": "Super User" + } + }, + { + "name": "MPI Orchestrations - Create/Read operations for patients and their clinical data", + "description": "", + "urlPattern": "^/fhir.*$", + "methods": ["GET", "POST"], + "type": "http", + "priority": null, + "tcpPort": null, + "tcpHost": null, + "pollingSchedule": null, + "requestBody": true, + "responseBody": true, + "allow": ["instant"], + "whitelist": [], + "authType": "private", "routes": [ { + "name": "MPI Mediator", "type": "http", "status": "enabled", - "forwardAuthHeader": false, - "name": "MPI Mediator", "secured": false, "host": "mpi-mediator", "port": 3000, @@ -61,59 +114,50 @@ "pathTransform": "", "primary": true, "username": "", - "password": "" + "password": "", + "forwardAuthHeader": false } ], - "requestBody": true, - "responseBody": true, - "rewriteUrlsConfig": [], - "name": "MPI Orchestrations - Create/Read operations for patients and their clinical data", - "urlPattern": "^/fhir.*$", - "priority": null, + "matchContentTypes": [], "matchContentRegex": null, "matchContentXpath": null, - "matchContentValue": null, "matchContentJson": null, - "pollingSchedule": null, - "tcpHost": null, - "tcpPort": null, - "updatedBy": { - "id": "63cd5092c962240014b87c59", - "name": "Super User" - }, - "alerts": [], - "description": "" - }, - { - "methods": [ - "POST" - ], - "type": "http", - "allow": [ - "instant" - ], - "whitelist": [], - "authType": "private", - "matchContentTypes": [], + "matchContentValue": null, "properties": [], "txViewAcl": [], "txViewFullAcl": [], "txRerunAcl": [], + "alerts": [], "status": "enabled", "rewriteUrls": false, "addAutoRewriteRules": true, + "rewriteUrlsConfig": [], "autoRetryEnabled": false, "autoRetryPeriodMinutes": 60, "updatedBy": { - "id": "63cd5092c962240014b87c59", + "id": "65d47e98223a0672c3c9808b", "name": "Super User" - }, + } + }, + { + "name": "MPI Orchestration for fhir bundles - Asynchronous flow", + "urlPattern": "^/async/fhir/?$", + "methods": ["POST"], + "type": "http", + "priority": null, + "tcpPort": null, + "tcpHost": null, + "pollingSchedule": null, + "requestBody": true, + "responseBody": true, + "allow": ["instant"], + "whitelist": [], + "authType": "private", "routes": [ { + "name": "MPI Mediator", "type": "http", "status": "enabled", - "forwardAuthHeader": false, - "name": "MPI Mediator", "secured": false, "host": "mpi-mediator", "port": 3000, @@ -121,85 +165,107 @@ "pathTransform": "", "primary": true, "username": "", - "password": "" + "password": "", + "forwardAuthHeader": false } ], - "requestBody": true, - "responseBody": true, - "rewriteUrlsConfig": [], - "urlPattern": "^/async/fhir/?$", - "priority": null, + "matchContentTypes": [], "matchContentRegex": null, "matchContentXpath": null, - "matchContentValue": null, "matchContentJson": null, - "pollingSchedule": null, - "tcpHost": null, - "tcpPort": null, + "matchContentValue": null, + "properties": [], + "txViewAcl": [], + "txViewFullAcl": [], + "txRerunAcl": [], "alerts": [], - "name": "MPI Orchestration for fhir bundles - Asynchronous flow" + "status": "enabled", + "rewriteUrls": false, + "addAutoRewriteRules": true, + "rewriteUrlsConfig": [], + "autoRetryEnabled": false, + "autoRetryPeriodMinutes": 60, + "updatedBy": { + "id": "65d47e98223a0672c3c9808b", + "name": "Super User" + } } ], "Mediators": [ + { + "urn": "urn:mediator:generic_mapper", + "version": "3.2.0", + "name": "Mapping Mediator", + "description": "Generic OpenHIM Mapping Mediator", + "endpoints": [ + { + "name": "Generic Mapping Mediator", + "type": "http", + "status": "enabled", + "host": "localhost", + "port": 3003, + "forwardAuthHeader": false + } + ], + "defaultChannelConfig": [], + "configDefs": [], + "_lastHeartbeat": "2024-02-22T16:09:57.075Z", + "_uptime": 101055.747901311 + }, { "urn": "urn:mediator:mpi-mediator", "version": "1.0.0", "name": "MPI mediator", "description": "A mediator handling interactions between the OpenHIM Core service, Sante MPI, Hapi-FHIR, and Kafka", - "defaultChannelConfig": [ + "endpoints": [ { - "methods": [ - "POST", - "GET" - ], + "name": "MPI Endpoint", "type": "http", - "allow": [ - "instant" - ], - "whitelist": [], - "authType": "private", - "matchContentTypes": [], - "properties": [], - "txViewAcl": [], - "txViewFullAcl": [], - "txRerunAcl": [], "status": "enabled", - "rewriteUrls": false, - "addAutoRewriteRules": true, - "autoRetryEnabled": false, - "autoRetryPeriodMinutes": 60, + "host": "mpi-mediator", + "port": 3000, + "path": "/fhir", + "primary": true, + "forwardAuthHeader": false + } + ], + "defaultChannelConfig": [ + { "name": "MPI mediator", "urlPattern": "^(/async)?/fhir.*$", + "methods": ["POST", "GET"], + "type": "http", + "allow": ["instant"], + "whitelist": [], + "authType": "private", "routes": [ { + "name": "MPI Endpoint", "type": "http", "status": "enabled", - "forwardAuthHeader": false, - "name": "MPI Endpoint", "host": "mpi-mediator", "port": 3000, - "primary": true + "primary": true, + "forwardAuthHeader": false } ], + "matchContentTypes": [], + "properties": [], + "txViewAcl": [], + "txViewFullAcl": [], + "txRerunAcl": [], "alerts": [], - "rewriteUrlsConfig": [] - } - ], - "endpoints": [ - { - "type": "http", "status": "enabled", - "forwardAuthHeader": false, - "name": "MPI Endpoint", - "host": "mpi-mediator", - "path": "/fhir", - "port": 3000, - "primary": true + "rewriteUrls": false, + "addAutoRewriteRules": true, + "rewriteUrlsConfig": [], + "autoRetryEnabled": false, + "autoRetryPeriodMinutes": 60 } ], "configDefs": [], - "_lastHeartbeat": "2023-01-23T09:40:21.368Z", - "_uptime": 52784.4956787 + "_lastHeartbeat": "2024-02-22T16:09:55.511Z", + "_uptime": 4490.494161057 } ], "ContactGroups": [] diff --git a/mpi-mediator/package-metadata.json b/mpi-mediator/package-metadata.json index 2ded100e..85c1f07e 100644 --- a/mpi-mediator/package-metadata.json +++ b/mpi-mediator/package-metadata.json @@ -4,15 +4,9 @@ "description": "This package creates a mediator service that is used in facilitating patient creation, updating and reading from the MPI and Hapi-Fhir, from FHIR bundles using Kafka", "type": "infrastructure", "version": "0.0.1", - "dependencies": [ - "interoperability-layer-openhim", - "fhir-datastore-hapi-fhir", - "message-bus-kafka", - "client-registry-santempi" - ], + "dependencies": ["fhir-datastore-hapi-fhir", "client-registry-jempi"], "environmentVariables": { "MPI_MEDIATOR_INSTANCES": 1, - "CLIENT_REGISTRY_URL": "http://santedb-mpi:8080", "OPENHIM_MEDIATOR_URL": "https://openhim-core:8080", "TRUST_SELF_SIGNED": "true", "OPENHIM_USERNAME": "root@openhim.org", @@ -21,9 +15,10 @@ "MPI_CLIENT_ID": "fiddler", "MPI_CLIENT_SECRET": "fiddler", "MPI_PROTOCOL": "http", - "MPI_HOST": "santedb-mpi", - "MPI_PORT": 8080, - "MPI_AUTH_ENABLED": "true", + "MPI_HOST": "openhim-mapping-mediator", + "MPI_PORT": 3003, + "MPI_AUTH_ENABLED": "false", + "MPI_PROXY_URL": "http://localhost:5001", "KAFKA_HOSTS": "kafka-01:9092", "KAFKA_BUNDLE_TOPIC": "2xx", "KAFKA_ASYNC_BUNDLE_TOPIC": "2xx-async", @@ -31,6 +26,10 @@ "FHIR_DATASTORE_PROTOCOL": "http", "FHIR_DATASTORE_HOST": "hapi-fhir", "FHIR_DATASTORE_PORT": 8080, - "DISABLE_VALIDATION": "false" + "DISABLE_VALIDATION": "true", + "ENABLE_JEMPI_GOLDEN_ID_UPDATE": "true", + "CLICKHOUSE_HOST": "analytics-datastore-clickhouse", + "CLICKHOUSE_PORT": "8123", + "PATIENT_PROFILE_FOR_STUB_PATIENT": "" } } diff --git a/mpi.env b/mpi.env new file mode 100644 index 00000000..d0b90c29 --- /dev/null +++ b/mpi.env @@ -0,0 +1,19 @@ +# General +CLUSTERED_MODE=false + +# Log +DEBUG=0 +BASHLOG_FILE=0 +BASHLOG_FILE_PATH=platform.log + +# Message Bus - Kafka +# !NOTE: Topics should comma seperated, optional include partion and repliction values +# e.g. :: -> test:3:2 (defaults to :3:1) +KAFKA_TOPICS=2xx,reprocess,3xx + +# SSO +KC_OPENHIM_SSO_ENABLED=true +OPENHIM_CONSOLE_SHOW_LOGIN=false +KC_JEMPI_SSO_ENABLED=true +REACT_APP_JEMPI_BASE_API_PORT=50001 +KC_GRAFANA_SSO_ENABLED=true diff --git a/openhim-mapping-mediator/docker-compose.yml b/openhim-mapping-mediator/docker-compose.yml index 9a393d29..cdfdd87b 100644 --- a/openhim-mapping-mediator/docker-compose.yml +++ b/openhim-mapping-mediator/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.9' services: openhim-mapping-mediator: - image: jembi/openhim-mediator-mapping:v3.2.0 + image: jembi/openhim-mediator-mapping:v3.3.0 environment: OPENHIM_REGISTER: ${OPENHIM_REGISTER} MONGO_URL: ${OPENHIM_MONGO_URL} @@ -17,6 +17,7 @@ services: mongo: kafka: public: + hapi: networks: openhim: @@ -31,3 +32,6 @@ networks: public: name: openhim_mapping_mediator_public external: true + hapi: + name: hapi-fhir_public + external: true diff --git a/purge-local.sh b/purge-local.sh index b06018b7..7a6ed528 100755 --- a/purge-local.sh +++ b/purge-local.sh @@ -2,5 +2,6 @@ docker service rm $(docker service ls -q) docker rm -f $(docker ps -aq) -docker volume prune -f +docker volume prune -af docker config rm $(docker config ls -q) +docker network prune -f diff --git a/test/cucumber/features/cluster-mode/jempi.cluster.feature b/test/cucumber/features/cluster-mode/jempi.cluster.feature index e1a3bf95..69dbb7a2 100644 --- a/test/cucumber/features/cluster-mode/jempi.cluster.feature +++ b/test/cucumber/features/cluster-mode/jempi.cluster.feature @@ -28,11 +28,8 @@ Feature: Client Registry JeMPI? And The service "jempi-async-receiver" should be started with 1 replica And The service "jempi-async-receiver" should be connected to the networks | kafka_public | jempi_default | - And The service "jempi-sync-receiver" should be started with 1 replica - And The service "jempi-sync-receiver" should be connected to the networks - | kafka_public | jempi_default | - And The service "jempi-pre-processor" should be started with 1 replica - And The service "jempi-pre-processor" should be connected to the networks + And The service "jempi-etl" should be started with 1 replica + And The service "jempi-etl" should be connected to the networks | kafka_public | jempi_default | And The service "jempi-controller" should be started with 1 replica And The service "jempi-controller" should be connected to the networks @@ -72,8 +69,7 @@ Feature: Client Registry JeMPI? And The service "jempi-alpha-02" should be removed And The service "jempi-alpha-03" should be removed And The service "jempi-async-receiver" should be removed - And The service "jempi-sync-receiver" should be removed - And The service "jempi-pre-processor" should be removed + And The service "jempi-etl" should be removed And The service "jempi-controller" should be removed And The service "jempi-em-calculator" should be removed And The service "jempi-linker" should be removed diff --git a/test/cucumber/features/single-mode/jempi.feature b/test/cucumber/features/single-mode/jempi.feature index d4fdfa8a..a591f803 100644 --- a/test/cucumber/features/single-mode/jempi.feature +++ b/test/cucumber/features/single-mode/jempi.feature @@ -20,11 +20,8 @@ Feature: Client Registry JeMPI? And The service "jempi-async-receiver" should be started with 1 replica And The service "jempi-async-receiver" should be connected to the networks | kafka_public | jempi_default | - And The service "jempi-sync-receiver" should be started with 1 replica - And The service "jempi-sync-receiver" should be connected to the networks - | kafka_public | jempi_default | - And The service "jempi-pre-processor" should be started with 1 replica - And The service "jempi-pre-processor" should be connected to the networks + And The service "jempi-etl" should be started with 1 replica + And The service "jempi-etl" should be connected to the networks | kafka_public | jempi_default | And The service "jempi-controller" should be started with 1 replica And The service "jempi-controller" should be connected to the networks @@ -58,8 +55,7 @@ Feature: Client Registry JeMPI? And The service "jempi-alpha-02" should be removed And The service "jempi-alpha-03" should be removed And The service "jempi-async-receiver" should be removed - And The service "jempi-sync-receiver" should be removed - And The service "jempi-pre-processor" should be removed + And The service "jempi-etl" should be removed And The service "jempi-controller" should be removed And The service "jempi-em-calculator" should be removed And The service "jempi-linker" should be removed diff --git a/utils/config-utils.sh b/utils/config-utils.sh deleted file mode 100755 index 01a6156c..00000000 --- a/utils/config-utils.sh +++ /dev/null @@ -1,433 +0,0 @@ -#!/bin/bash -# -# Library name: config -# This is a library that contains functions to assist with docker configs -# -# For functions using `declare -n`, note the following explanation https://linuxhint.com/bash_declare_command/#:~:text=giving%20them%20attributes.-,Namerefs,-If%20you%20are - -. "$(pwd)/utils/log.sh" - -# Sets the digest variables for the conf raft files in the provided docker compose file -# -# Requirements: -# - All configs must have a file and name property -# - The name property must end in -${DIGEST_VAR_NAME:?err} (eg. name: my-file-${MY_FILE_DIGEST:?err}) -# -# Arguments: -# - $1 : docker compose directory path (eg. /home/user/project/docker-compose.yml) -# -# Exports: -# As many digest environment variables as are declared in the provided docker compose file -# -config::set_config_digests() { - local -r DOCKER_COMPOSE_PATH="${1:?$(missing_param "set_config_digests")}" - - # Get configs files and names from yml file - local -r files=($(yq '.configs."*.*".file' "${DOCKER_COMPOSE_PATH}")) - local -r names=($(yq '.configs."*.*".name' "${DOCKER_COMPOSE_PATH}")) - local -r compose_folder_path="${DOCKER_COMPOSE_PATH%/*}" - - if [[ "${files[*]}" != *"null"* ]] && [[ "${names[*]}" != *"null"* ]]; then - log info "Setting config digests" - - for ((i = 0; i < ${#files[@]}; i++)); do - file=${files[$i]} - name=${names[$i]} - - file_name="${compose_folder_path}${file//\.\///}" # TODO: Throw an error if the file name is too long to allow for a unique enough digest - env_var_name=$(echo "${name}" | grep -P -o "{.*:?err}" | sed 's/[{}]//g' | sed 's/:?err//g') - - if [[ -n "$env_var_name" ]]; then - # generate and truncate the digest to conform to the 64 character restriction on docker config names - env_declaration_characters=":?err" # '${:?err}' from setting an env variable - remainder=$((64 - (${#name} - ${#env_var_name} - ${#env_declaration_characters}))) - export "${env_var_name}"="$(cksum "${file_name}" | awk '{print $1}' | cut -c -${remainder})" - fi - done - elif [[ "${files[*]}" == *"null"* ]]; then - log error "No files found to set the digest in:\n $DOCKER_COMPOSE_PATH" - exit 1 - else - log error "You should specify names for the files in:\n $DOCKER_COMPOSE_PATH" - exit 1 - fi -} - -# Removes stale docker configs based on the provided docker-compose file -# -# Requirements: -# - All configs must have a file and name property -# - The name property must end in -${DIGEST_VAR_NAME:?err} (eg. name: my-file-${MY_FILE_DIGEST:?err}) -# -# Arguments: -# - $1 : docker compose directory path (eg. /home/user/project/docker-compose.yml) -# - $2 : config label (eg. logstash) -# -config::remove_stale_service_configs() { - local -r DOCKER_COMPOSE_PATH="${1:?$(missing_param "remove_stale_service_configs" "DOCKER_COMPOSE_PATH")}" - local -r CONFIG_LABEL="${2:?$(missing_param "remove_stale_service_configs" "CONFIG_LABEL")}" - - local -r compose_names=($(yq '.configs."*.*".name' "${DOCKER_COMPOSE_PATH}")) - local configs_to_remove=() - - if [[ "${compose_names[*]}" != "null" ]]; then - for compose_name in "${compose_names[@]}"; do - compose_name_without_env=$(echo "${compose_name}" | sed 's/-\${.*//g') - - compose_name_occurences=$(for word in "${compose_names[@]}"; do echo "${word}"; done | grep -c "${compose_name_without_env}") - if [[ $compose_name_occurences -gt "1" ]]; then - log warn "Warning: Duplicate config name (${compose_name_without_env}) was found in ${DOCKER_COMPOSE_PATH}" - fi - - raft_ids=($(docker config ls -f "label=name=${CONFIG_LABEL}" -f "name=${compose_name_without_env}" --format "{{.ID}}")) - # Only keep the most recent of all configs with the same name - if [[ ${#raft_ids[@]} -gt 1 ]]; then - most_recent_raft_id="${raft_ids[0]}" - for ((i = 1; i < ${#raft_ids[@]}; i++)); do - raft_id=${raft_ids[$i]} - most_recent_raft_created_date=$(docker config inspect -f "{{.CreatedAt}}" "${most_recent_raft_id}") - raft_created_date=$(docker config inspect -f "{{.CreatedAt}}" "${raft_id}") - if [[ $raft_created_date > $most_recent_raft_created_date ]]; then - configs_to_remove+=("${most_recent_raft_id}") - most_recent_raft_id="${raft_id}" - else - configs_to_remove+=("${raft_id}") - fi - done - fi - done - else - log warn "No name files found in the compose config to be removed" - fi - - if [[ "${#configs_to_remove[@]}" -gt 0 ]]; then - try \ - "docker config rm ${configs_to_remove[*]}" \ - catch \ - "Failed to remove configs: ${configs_to_remove[*]}" - fi -} - -# A function that exists in a loop to see how long that loop has run for, providing a warning -# at the time specified in argument $3, and exits with code 124 after the time specified in argument $4. -# -# Arguments: -# - $1 : start time of the timeout check -# - $2 : a message containing reference to the loop that timed out -# - $3 : timeout time in seconds, default is 300 seconds -# - $4 : elapsed time to issue running-for-longer-than-expected warning (in seconds), default is 60 seconds -# -config::timeout_check() { - local start_time=$(($1)) - local message=$2 - local exit_time="${3:-300}" - local warning_time="${4:-60}" - - local timeDiff=$(($(date +%s) - $start_time)) - if [[ $timeDiff -ge $warning_time ]] && [[ $timeDiff -lt $(($warning_time + 1)) ]]; then - log warn "Warning: Waited $warning_time seconds for $message. This is taking longer than it should..." - elif [[ $timeDiff -ge $exit_time ]]; then - log error "Fatal: Waited $exit_time seconds for $message. Exiting..." - exit 124 - fi -} - -# A generic function confirming whether or not a containerized api is reachable -# -# Requirements: -# - The function attempts to start up a helper container using the jembi/await-helper image. It is therefore necessary -# to specify the docker-compose file to deploy the await-helper container which the await_service_running function -# relies on. Details on configuring the await-helper can be found at https://github.com/jembi/platform-await-helper. -# -# Arguments: -# - $1 : the service being awaited -# - $2 : path to await-helper compose.yml file (eg. ~/projects/platform/dashboard-visualiser-jsreport/docker-compose.await-helper.yml) -# - $3 : desired number of instances of the awaited-service -# - $4 : stack name that the service falls under (eg. openhim) -# - $5 : (optional) the max time allowed to wait for a service's response, defaults to 300 seconds -# - $6 : (optional) elapsed time to throw a warning, defaults to 60 seconds -# -config::await_service_running() { - local -r SERVICE_NAME="${1:?$(missing_param "await_service_running" "SERVICE_NAME")}" - local -r AWAIT_HELPER_FILE_PATH="${2:?$(missing_param "await_service_running" "AWAIT_HELPER_FILE_PATH")}" - local -r SERVICE_INSTANCES="${3:?$(missing_param "await_service_running" "SERVICE_INSTANCES")}" - local -r STACK_NAME="${4:?$(missing_param "await_service_running" "STACK_NAME")}" - local -r exit_time="${5:-}" - local -r warning_time="${6:-}" - local start_time - start_time=$(date +%s) - - docker service rm "$STACK_NAME"_await-helper &>/dev/null - - try "docker stack deploy -c $AWAIT_HELPER_FILE_PATH $STACK_NAME" throw "Failed to deploy await helper" - until [[ $(docker service ls -f name="$STACK_NAME"_"$SERVICE_NAME" --format "{{.Replicas}}") == *"$SERVICE_INSTANCES/$SERVICE_INSTANCES"* ]]; do - config::timeout_check "$start_time" "$SERVICE_NAME to start" "$exit_time" "$warning_time" - sleep 1 - done - - start_time=$(date +%s) # Reintialize for the second loop - local await_helper_state - await_helper_state=$(docker service ps "$STACK_NAME"_await-helper --format "{{.CurrentState}}") - until [[ $await_helper_state == *"Complete"* ]]; do - config::timeout_check "$start_time" "$SERVICE_NAME status check" "$exit_time" "$warning_time" - sleep 1 - - await_helper_state=$(docker service ps "$STACK_NAME"_await-helper --format "{{.CurrentState}}") - if [[ $await_helper_state == *"Failed"* ]] || [[ $await_helper_state == *"Rejected"* ]]; then - log error "Fatal: Received error when trying to verify state of $SERVICE_NAME. Error: - $(docker service ps "$STACK_NAME"_await-helper --no-trunc --format '{{.Error}}')" - exit 1 - fi - done - - try "docker service rm "$STACK_NAME"_await-helper" catch "Failed to remove await-helper" -} - -# A function which removes a config importing service on successful completion, and exits with an error otherwise -# -# Arguments: -# - $1 : stack name that the service falls under (eg. openhim) -# - $2 : the name of the config importer -# - $3 : (optional) the timeout time for the config importer to run, defaults to 300 seconds -# - $4 : (optional) elapsed time to throw a warning, defaults to 60 seconds -# -config::remove_config_importer() { - local -r STACK_NAME="${1:?$(missing_param "remove_config_importer" "STACK_NAME")}" - local -r CONFIG_IMPORTER_SERVICE_NAME="${2:?$(missing_param "remove_config_importer" "CONFIG_IMPORTER_SERVICE_NAME")}" - local -r exit_time="${3:-}" - local -r warning_time="${4:-}" - local -r start_time=$(date +%s) - - local config_importer_state - - if [[ -z $(docker service ps "$STACK_NAME"_"$CONFIG_IMPORTER_SERVICE_NAME") ]]; then - log info "${STACK_NAME}_$CONFIG_IMPORTER_SERVICE_NAME service cannot be removed as it does not exist!" - exit 0 - fi - - config_importer_state=$(docker service ps "$STACK_NAME"_"$CONFIG_IMPORTER_SERVICE_NAME" --format "{{.CurrentState}}") - until [[ $config_importer_state == *"Complete"* ]]; do - config::timeout_check "$start_time" "$CONFIG_IMPORTER_SERVICE_NAME to run" "$exit_time" "$warning_time" - sleep 1 - - config_importer_state=$(docker service ps "$STACK_NAME"_"$CONFIG_IMPORTER_SERVICE_NAME" --format "{{.CurrentState}}") - if [[ $config_importer_state == *"Failed"* ]] || [[ $config_importer_state == *"Rejected"* ]]; then - log error "Fatal: $CONFIG_IMPORTER_SERVICE_NAME failed with error: - $(docker service ps ${STACK_NAME}_"$CONFIG_IMPORTER_SERVICE_NAME" --no-trunc --format '{{.Error}}')" - exit 1 - fi - done - - try "docker service rm "$STACK_NAME"_$CONFIG_IMPORTER_SERVICE_NAME" catch "Failed to remove config importer" -} - -# Waits for the provided service to be removed -# -# Arguments: -# - $1 : stack name that the service falls under (eg. openhim) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# -config::await_service_removed() { - local -r STACK_NAME="${1:?$(missing_param "await_service_removed", "STACK_NAME")}" - local -r SERVICE_NAME="${2:?$(missing_param "await_service_removed", "SERVICE_NAME")}" - local start_time=$(date +%s) - - until [[ -z $(docker stack ps $STACK_NAME -qf name="${STACK_NAME}_${SERVICE_NAME}" 2>/dev/null) ]]; do - config::timeout_check "$start_time" "${SERVICE_NAME} to be removed" - sleep 1 - done - log info "Service $SERVICE_NAME successfully removed" -} - -# Generates configs for a service from a folder and adds them to a temp docker-compose file -# -# Arguments: -# - $1 : service name (eg. data-mapper-logstash) -# - $2 : target base (eg. /usr/share/logstash/) -# - $3 : target folder path in absolute format (eg. "$PATH_TO_FILE"/pipeline) -# - $4 : compose file path (eg. "$PATH_TO_FILE") -# -# Exports: -# All exports are required for yq to process the values and are not intended for external use -# - service_config_query -# - config_target -# - config_source -# - config_query -# - config_file -# - config_label_name -# - config_service_name -# -config::generate_service_configs() { - local -r SERVICE_NAME=${1:?$(missing_param "generate_service_configs" "SERVICE_NAME")} - local -r TARGET_BASE=${2:?$(missing_param "generate_service_configs" "TARGET_BASE")} - local -r TARGET_FOLDER_PATH=${3:?$(missing_param "generate_service_configs" "TARGET_FOLDER_PATH")} - local -r COMPOSE_PATH=${4:?$(missing_param "generate_service_configs" "COMPOSE_PATH")} - local -r LABEL_NAME=${5:?$(missing_param "generate_service_configs" "LABEL_NAME")} - local -r TARGET_FOLDER_NAME=$(basename "${TARGET_FOLDER_PATH}") - local count=0 - - try \ - "touch ${COMPOSE_PATH}/docker-compose.tmp.yml" \ - throw \ - "Failed to create temp service config compose file" - - find "${TARGET_FOLDER_PATH}" -maxdepth 10 -mindepth 1 -type f | while read -r file; do - file_name=${file/"${TARGET_FOLDER_PATH%/}"/} - file_name=${file_name:1} - file_hash=$(cksum "${file}" | awk '{print $1}') - - # for these variables to be visible by yq they need to be exported - export service_config_query=".services.${SERVICE_NAME}.configs[${count}]" - export config_target="${TARGET_BASE%/}/${TARGET_FOLDER_NAME}/${file_name}" - export config_source="${SERVICE_NAME}-${file_hash}" - - export config_query=".configs.${config_source}" - export config_file="./${TARGET_FOLDER_NAME}/${file_name}" - export config_label_name=$LABEL_NAME - export config_service_name=$SERVICE_NAME - - yq -i ' - .version = "3.9" | - eval(strenv(service_config_query)).target = env(config_target) | - eval(strenv(service_config_query)).source = strenv(config_source) | - eval(strenv(config_query)).file = strenv(config_file) | - eval(strenv(config_query)).name = strenv(config_source) | - eval(strenv(config_query)).labels.name = strenv(config_label_name) | - eval(strenv(config_query)).labels.service = strenv(config_service_name) - ' "${COMPOSE_PATH}/docker-compose.tmp.yml" - - count=$((count + 1)) - done -} - -# Replaces all environment variables in a file with the environment variable value -# -# Arguments: -# - $1 : the path to the file that you wish to substitute env vars into (eg. "${COMPOSE_FILE_PATH}"/config.ini) -# -config::substitute_env_vars() { - local -r FILE_PATH="${1:?$(missing_param "substitute_env_vars")}" - config_with_env=$(envsubst <"${FILE_PATH}") - echo "" >"${FILE_PATH}" - echo "$config_with_env" >>"${FILE_PATH}" -} - -# Modify a variable to contain the necessary `--config-rm` and `--config-add` arguments to update a service's -# configs based off newly created docker configs for a provided folder. The modified variable must then be -# used in a `docker service update` command, like follows: -# ``` -# service_update_args="" -# config::update_service_configs service_update_args /usr/share/logstash/ "$PATH_TO_FILE"/pipeline cares -# docker service update $service_update_args instant_data-mapper-logstash -# ``` -# Reference arguments: -# - $1 : config update variable name (eg. service_update_args) -# -# Arguments: -# - $2 : target base (eg. /usr/share/logstash/) -# - $3 : target folder path in absolute format (eg. "$PATH_TO_FILE"/pipeline) -# - $4 : config label name (eg. cares) -# -config::update_service_configs() { - declare -n REF_config_update_var="${1:?$(missing_param "update_service_configs" "REF_config_update_var")}" - local -r TARGET_BASE=${2:?$(missing_param "update_service_configs" "TARGET_BASE")} - local -r TARGET_FOLDER_PATH=${3:?$(missing_param "update_service_configs" "TARGET_FOLDER_PATH")} - local -r CONFIG_LABEL_NAME="${4:?$(missing_param "update_service_configs" "CONFIG_LABEL_NAME")}" - local config_rm_string="" - local config_add_string="" - - files=$(find "${TARGET_FOLDER_PATH}" -maxdepth 10 -mindepth 1 -type f) - - for file in $files; do - file_name=${file/"${TARGET_FOLDER_PATH%/}"/} - file_name=${file_name:1} - file_hash=$(md5sum "${file}" | awk '{print $1}') - config_file="${TARGET_FOLDER_PATH}/${file_name}" - config_target="${TARGET_BASE%/}/${file_name}" - config_name=$(basename "$file_name")-$file_hash - old_config_name=$(docker config inspect --format="{{.Spec.Name}}" "$(docker config ls -qf name="$(basename "$file_name")")" 2>/dev/null) - - if [[ "$config_name" != "$old_config_name" ]]; then - if [[ -n $old_config_name ]]; then - config_rm_string+="--config-rm $old_config_name " - fi - config_add_string+="--config-add source=$config_name,target=$config_target " - - try \ - "docker config create --label name=$CONFIG_LABEL_NAME $config_name $config_file" \ - catch \ - "Failed to create config" - fi - done - - REF_config_update_var+="$config_rm_string $config_add_string" -} - -# Modify a variable to contain the necessary `--env-add` arguments to update a service's -# environment specified in a .env file. The modified variable must then be -# used in a `docker service update` command, like follows: -# ``` -# service_update_args="" -# config::env_var_add_from_file service_update_args "$PATH_TO_FILE"/.env.add -# docker service update $service_update_args instant_data-mapper-logstash -# ``` -# Reference arguments: -# - $1 : service update variable name (eg. service_update_args) -# -# Arguments: -# - $2 : .env file (eg. "$PATH_TO_FILE"/.env.add) -# -config::env_var_add_from_file() { - declare -n REF_service_update_var="${1:?$(missing_param "env_var_add_from_file" "REF_service_update_var")}" - local -r ENV_FILE=${2:?$(missing_param "env_var_add_from_file" "ENV_FILE")} - - if [[ ! -f $ENV_FILE ]]; then - log error "$ENV_FILE: No such file or directory. Exiting..." - return 1 - fi - - readarray -t env_vars <"$ENV_FILE" - for env_var in "${env_vars[@]}"; do - REF_service_update_var+=" --env-add $env_var" - done -} - -# Modify a variable to contain the necessary `--env-add` arguments to update a service's -# environment based on the provided env var. The modified variable must then be -# used in a `docker service update` command, like follows: -# ``` -# service_update_args="" -# config::env_var_add service_update_args MY_ENV_VAR=my_value -# docker service update $service_update_args instant_data-mapper-logstash -# ``` -# Reference arguments: -# - $1 : service update variable name (eg. service_update_args) -# -# Arguments: -# - $2 : env var (eg. MY_ENV_VAR=my_value) -# -config::env_var_add() { - declare -n REF_service_update_var="${1:?$(missing_param "env_var_add" "REF_service_update_var")}" - local -r ENV_VAR=${2:?$(missing_param "env_var_add" "ENV_VAR")} - - REF_service_update_var+=" --env-add $ENV_VAR" -} - -# Waits for the provided service to be reachable by checking logs -# -# Arguments: -# $1 : service name (eg. analytics-datastore-elastic-search) -# $2 : stack name that the service falls under (eg. openhim) -# $3 : log string to be checked (eg. Starting) -# -config::await_service_reachable() { - local -r SERVICE_NAME=${1:?$(missing_param "await_service_reachable" "SERVICE_NAME")} - local -r STACK_NAME=${2:?$(missing_param "await_service_reachable" "STACK_NAME")} - local -r LOG_MESSAGE=${3:?$(missing_param "await_service_reachable" "LOG_MESSAGE")} - local -r start_time=$(date +%s) - - until [[ $(docker service logs --tail all "${STACK_NAME}"_"${SERVICE_NAME}" 2>/dev/null | grep -c "${LOG_MESSAGE}") -gt 0 ]]; do - config::timeout_check "$start_time" "${STACK_NAME}_$SERVICE_NAME to be reachable" - sleep 1 - done -} diff --git a/utils/docker-utils.sh b/utils/docker-utils.sh deleted file mode 100644 index 6c960bbf..00000000 --- a/utils/docker-utils.sh +++ /dev/null @@ -1,574 +0,0 @@ -#!/bin/bash -# -# Library name: docker -# This is a library that contains functions to assist with docker actions - -. "$(pwd)/utils/config-utils.sh" -. "$(pwd)/utils/log.sh" - -# Gets current status of the provided service -# -# Arguments: -# - $1 : service name (eg. analytics-datastore-elastic-search) -# -docker::get_current_service_status() { - local -r SERVICE_NAME=${1:?$(missing_param "get_current_service_status")} - docker service ps "${SERVICE_NAME}" --format "{{.CurrentState}}" 2>/dev/null -} - -# Gets unique errors from the provided service -# -# Arguments: -# - $1 : service name (eg. analytics-datastore-elastic-search) -# -docker::get_service_unique_errors() { - local -r SERVICE_NAME=${1:?$(missing_param "get_service_unique_errors")} - - # Get unique error messages using sort -u - docker service ps "${SERVICE_NAME}" --no-trunc --format '{{ .Error }}' 2>&1 | sort -u -} - -# Waits for a container to be up -# -# Arguments: -# - $1 : stack name that the service falls under (eg. elastic) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# -docker::await_container_startup() { - local -r STACK_NAME=${1:?$(missing_param "await_container_startup", "STACK_NAME")} - local -r SERVICE_NAME=${2:?$(missing_param "await_container_startup", "SERVICE_NAME")} - - log info "Waiting for ${SERVICE_NAME} to start up..." - local start_time - start_time=$(date +%s) - until [[ -n $(docker service ls -qf name="${STACK_NAME}"_"${SERVICE_NAME}") ]]; do - config::timeout_check "${start_time}" "${SERVICE_NAME} to start" - sleep 1 - done - overwrite "Waiting for ${SERVICE_NAME} to start up... Done" -} - -# Waits for a container to be up -# -# Arguments: -# - $1 : stack name that the service falls under (eg. elastic) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# - $3 : service status (eg. running) -# -docker::await_service_status() { - local -r STACK_NAME=${1:?$(missing_param "await_service_status" "STACK_NAME")} - local -r SERVICE_NAME=${2:?$(missing_param "await_service_status" "SERVICE_NAME")} - local -r SERVICE_STATUS=${3:?$(missing_param "await_service_status" "SERVICE_STATUS")} - local -r start_time=$(date +%s) - local error_message=() - - log info "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be ${SERVICE_STATUS}..." - until [[ $(docker::get_current_service_status ${STACK_NAME}_${SERVICE_NAME}) == *"${SERVICE_STATUS}"* ]]; do - config::timeout_check "${start_time}" "${STACK_NAME}_${SERVICE_NAME} to start" - sleep 1 - - # Get unique error messages using sort -u - new_error_message=($(docker::get_service_unique_errors ${STACK_NAME}_$SERVICE_NAME)) - if [[ -n ${new_error_message[*]} ]]; then - # To prevent logging the same error - if [[ "${error_message[*]}" != "${new_error_message[*]}" ]]; then - error_message=(${new_error_message[*]}) - log error "Deploy error in service ${STACK_NAME}_$SERVICE_NAME: ${error_message[*]}" - fi - - # To exit in case the error is not having the image - if [[ "${new_error_message[*]}" == *"No such image"* ]]; then - log error "Do you have access to pull the image?" - exit 124 - fi - fi - done - overwrite "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be ${SERVICE_STATUS}... Done" -} - -# Waits for a container to be destroyed -# -# Arguments: -# - $1 : stack name that the service container falls under (eg. elastic) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# -docker::await_container_destroy() { - local -r STACK_NAME=${1:?$(missing_param "await_container_destroy", "STACK_NAME")} - local -r SERVICE_NAME=${2:?$(missing_param "await_container_destroy", "SERVICE_NAME")} - - log info "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be destroyed..." - local start_time - start_time=$(date +%s) - until [[ -z $(docker ps -qlf name="${STACK_NAME}_${SERVICE_NAME}") ]]; do - config::timeout_check "${start_time}" "${SERVICE_NAME} to be destroyed" - sleep 1 - done - overwrite "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be destroyed... Done" -} - -# Waits for a service to be destroyed -# -# Arguments: -# - $1 : service name (eg. analytics-datastore-elastic-search) -# - $2 : stack name that the service falls under (eg. elastic) -# -docker::await_service_destroy() { - local -r SERVICE_NAME=${1:?$(missing_param "await_service_destroy", "SERVICE_NAME")} - local -r STACK_NAME=${2:?$(missing_param "await_service_destroy", "STACK_NAME")} - local start_time - start_time=$(date +%s) - - while docker service ls | grep -q "\s${STACK_NAME}_${SERVICE_NAME}\s"; do - config::timeout_check "${start_time}" "${SERVICE_NAME} to be destroyed" - sleep 1 - done -} - -# Removes services containers then the service itself -# This was created to aid in removing volumes, -# since volumes being removed were still attached to some lingering containers after container remove -# -# NB: Global services can't be scale down -# -# Arguments: -# - $1 : stack name that the services fall under (eg. elasticsearch) -# - $@ : service names list (eg. analytics-datastore-elastic-search) -# -docker::service_destroy() { - local -r STACK_NAME=${1:?$(missing_param "service_destroy", "STACK_NAME")} - shift - - if [[ -z "$*" ]]; then - log error "$(missing_param "service_destroy", "[SERVICE_NAMES]")" - exit 1 - fi - - for service_name in "$@"; do - local service="${STACK_NAME}"_$service_name - log info "Waiting for service $service to be removed ... " - if [[ -n $(docker service ls -qf name=$service) ]]; then - if [[ $(docker service ls --format "{{.Mode}}" -f name=$service) != "global" ]]; then - try "docker service scale $service=0" catch "Failed to scale down ${service_name}" - fi - try "docker service rm $service" catch "Failed to remove service ${service_name}" - docker::await_service_destroy "$service_name" "$STACK_NAME" - fi - overwrite "Waiting for service $service_name to be removed ... Done" - done -} - -# Removes the stack and awaits for each service in the stack to be removed -# -# Arguments: -# - $1 : stack name to be removed -# -docker::stack_destroy() { - local -r STACK_NAME=${1:?$(missing_param "stack_destroy")} - log info "Waiting for stack $STACK_NAME to be removed ..." - try "docker stack rm \ - $STACK_NAME" \ - throw \ - "Failed to remove $STACK_NAME" - - local start_time=$(date +%s) - while [[ -n "$(docker stack ps $STACK_NAME 2>/dev/null)" ]] ; do - config::timeout_check "${start_time}" "${STACK_NAME} to be destroyed" - sleep 1 - done - - overwrite "Waiting for stack $STACK_NAME to be removed ... Done" - - log info "Pruning networks ... " - try "docker network prune -f" catch "Failed to prune networks" - overwrite "Pruning networks ... done" - - docker::prune_volumes -} - -# Loops through all current services and builds up a dictionary of volume names currently in use -# (this also considers downed services, as you don't want to prune volumes for downed services) -# It then loops through all volumes and removes any that do not have a service definition attached to it -# -docker::prune_volumes() { - # Create an associative array to act as the dictionary to hold service volume names - # Need to add instant, which the gocli uses but is not defined as a service - declare -A referenced_volumes=(['instant']=true) - - log info "Pruning volumes ... " - - for service in $(docker service ls -q); do - for volume in $(docker service inspect $service --format '{{range .Spec.TaskTemplate.ContainerSpec.Mounts}}{{println .Source}}{{end}}'); do - referenced_volumes[$volume]=true - done - done - - for volume in $(docker volume ls --format {{.Name}}); do - # Check to see if the key (which is the volume name) exists - if [[ -v referenced_volumes[$volume] ]]; then - continue - fi - - # Ignore volumes attached to a container but are not apart of a service definition - local start_time=$(date +%s) - local should_ignore=true - if [[ -n $(docker ps -a -q --filter volume=$volume) ]]; then - local timeDiff=$(($(date +%s) - $start_time)) - until [[ $timeDiff -ge 10 ]]; do - timeDiff=$(($(date +%s) - $start_time)) - if [[ -n $(docker ps -a -q --filter volume=$volume) ]]; then - sleep 1 - else - should_ignore=false - fi - done - if $should_ignore; then - continue - fi - fi - - log info "Waiting for volume $volume to be removed..." - start_time=$(date +%s) - until [[ -z "$(docker volume ls -q --filter name=^$volume$ 2>/dev/null)" ]]; do - docker volume rm $volume >/dev/null 2>&1 - config::timeout_check "${start_time}" "$volume to be removed" "60" "10" - sleep 1 - done - overwrite "Waiting for volume $volume to be removed... Done" - done - - overwrite "Pruning volumes ... done" -} - -# Prunes configs based on a label -# -# Arguments: -# - $@ : configs label list (eg. logstash) -# -docker::prune_configs() { - if [[ -z "$*" ]]; then - log error "$(missing_param "prune_configs", "[CONFIG_LABELS]")" - exit 1 - fi - - for config_name in "$@"; do - # shellcheck disable=SC2046 - if [[ -n $(docker config ls -qf label=name="$config_name") ]]; then - log info "Waiting for configs to be removed..." - - docker config rm $(docker config ls -qf label=name="$config_name") &>/dev/null - - overwrite "Waiting for configs to be removed... Done" - fi - done -} - -# Checks if the image exists, if not it will pull it from docker -# -# Arguments: -# - $@ : images list (eg. bitnami/kafka:3.3.1) -# -docker::check_images_existence() { - if [[ -z "$*" ]]; then - log error "$(missing_param "check_images_existence", "[IMAGES]")" - exit 1 - fi - - local timeout_pull_image - timeout_pull_image=300 - for image_name in "$@"; do - image_name=$(eval echo "$image_name") - if [[ -z $(docker image inspect "$image_name" --format "{{.Id}}" 2>/dev/null) ]]; then - log info "The image $image_name is not found, Pulling from docker..." - try \ - "timeout $timeout_pull_image docker pull $image_name 1>/dev/null" \ - throw \ - "An error occured while pulling the image $image_name" - - overwrite "The image $image_name is not found, Pulling from docker... Done" - fi - done -} - -# Deploys a service -# It will pull images if they don't exist in the local docker hub registry -# It will set config digests (in case a config is defined in the compose file) -# It will remove stale configs -# -# Arguments: -# - $1 : docker stack name to group the service under -# - $2 : docker compose path (eg. /instant/monitoring) -# - $3 : docker compose file (eg. docker-compose.yml or docker-compose.cluster.yml) -# - $@ : (optional) list of docker compose files (eg. docker-compose.cluster.yml docker-compose.dev.yml) -# - $@:4:n : (optional) a marker 'defer-sanity' used to defer deploy::sanity to the caller, can appear anywhere in the optional list -# -docker::deploy_service() { - local -r STACK_NAME="${1:?$(missing_param "deploy_service" "STACK_NAME")}" - local -r DOCKER_COMPOSE_PATH="${2:?$(missing_param "deploy_service" "DOCKER_COMPOSE_PATH")}" - local -r DOCKER_COMPOSE_FILE="${3:?$(missing_param "deploy_service" "DOCKER_COMPOSE_FILE")}" - local docker_compose_param="" - - # Check for the existance of the images - local -r images=($(yq '.services."*".image' "${DOCKER_COMPOSE_PATH}/$DOCKER_COMPOSE_FILE")) - if [[ "${images[*]}" != "null" ]]; then - docker::check_images_existence "${images[@]}" - fi - - local defer_sanity=false - for optional_config in "${@:4}"; do - if [[ -n $optional_config ]]; then - if [[ $optional_config == "defer-sanity" ]]; then - defer_sanity=true - else - docker_compose_param="$docker_compose_param -c ${DOCKER_COMPOSE_PATH}/$optional_config" - fi - fi - done - - docker::prepare_config_digests "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - docker::ensure_external_networks_existence "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - - try "docker stack deploy \ - -c ${DOCKER_COMPOSE_PATH}/$DOCKER_COMPOSE_FILE \ - $docker_compose_param \ - --with-registry-auth \ - ${STACK_NAME}" \ - throw \ - "Wrong configuration in ${DOCKER_COMPOSE_PATH}/$DOCKER_COMPOSE_FILE or in the other supplied compose files" - - docker::cleanup_stale_configs "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - - if [[ $defer_sanity != true ]]; then - docker::deploy_sanity "$STACK_NAME" "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - fi -} - -# Deploys a config importer -# Sets the config digests, deploys the config importer, removes it and removes the stale configs -# -# Arguments: -# - $1 : stack name that the service falls under -# - $2 : docker compose path (eg. /instant/monitoring/importer/docker-compose.config.yml) -# - $3 : services name (eg. clickhouse-config-importer) -# - $4 : config label (eg. clickhouse kibana) -# -docker::deploy_config_importer() { - local -r STACK_NAME="${1:?$(missing_param "deploy_config_importer" "STACK_NAME")}" - local -r CONFIG_COMPOSE_PATH="${2:?$(missing_param "deploy_config_importer" "CONFIG_COMPOSE_PATH")}" - local -r SERVICE_NAME="${3:?$(missing_param "deploy_config_importer" "SERVICE_NAME")}" - local -r CONFIG_LABEL="${4:?$(missing_param "deploy_config_importer" "CONFIG_LABEL")}" - - log info "Waiting for config importer $SERVICE_NAME to start ..." - ( - if [[ ! -f "$CONFIG_COMPOSE_PATH" ]]; then - log error "No such file: $CONFIG_COMPOSE_PATH" - exit 1 - fi - - config::set_config_digests "$CONFIG_COMPOSE_PATH" - - try \ - "docker stack deploy -c ${CONFIG_COMPOSE_PATH} ${STACK_NAME}" \ - throw \ - "Wrong configuration in $CONFIG_COMPOSE_PATH" - - log info "Waiting to give core config importer time to run before cleaning up service" - - config::remove_config_importer "$STACK_NAME" "$SERVICE_NAME" - config::await_service_removed "$STACK_NAME" "$SERVICE_NAME" - - log info "Removing stale configs..." - config::remove_stale_service_configs "$CONFIG_COMPOSE_PATH" "$CONFIG_LABEL" - overwrite "Removing stale configs... Done" - ) || { - log error "Failed to deploy the config importer: $SERVICE_NAME" - exit 1 - } -} - -# Checks for errors when deploying -# -# Arguments: -# - $1 : stack name that the services falls under -# - $@ : fully qualified path to the compose file(s) with service definitions (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::deploy_sanity() { - local -r STACK_NAME="${1:?$(missing_param "deploy_sanity" "STACK_NAME")}" - # shift off the stack name to get the subset of services to check - shift - - if [[ -z "$*" ]]; then - log error "$(missing_param "deploy_sanity" "[COMPOSE_FILES]")" - exit 1 - fi - - local services=() - for compose_file in "$@"; do - # yq 'keys' returns:"- foo - bar" if you have yml with a foo: and bar: service definition - # which is why we remove the "- " before looping - # it will also return '#' as a key if you have a comment, so we clean them with ' ... comments="" ' first - local compose_services=$(yq '... comments="" | .services | keys' $compose_file) - compose_services=${compose_services//- /} - for service in ${compose_services[@]}; do - # only append unique service to services - if [[ ! ${services[*]} =~ $service ]]; then - services+=($service) - fi - done - done - - for service_name in ${services[@]}; do - docker::await_service_status $STACK_NAME "$service_name" "Running" - done -} - -# Scales services to the passed in replica count -# -# Arguments: -# - $1 : stack name that the services falls under -# - $2 : replicas number (eg. 0 (to scale down) or 1 (to scale up) or 2 (to scale up more)) -# -docker::scale_services() { - local -r STACK_NAME="${1:?$(missing_param "scale_services" "STACK_NAME")}" - local -r REPLICAS="${2:?$(missing_param "scale_services" "REPLICAS")}" - local services=($(docker stack services $STACK_NAME | awk '{print $2}' | tail -n +2)) - for service_name in "${services[@]}"; do - log info "Waiting for $service_name to scale to $REPLICAS ..." - try \ - "docker service scale $service_name=$REPLICAS" \ - catch \ - "Failed to scale $service_name to $REPLICAS" - overwrite "Waiting for $service_name to scale to $REPLICAS ... Done" - done -} - -# Checks if the external networks exist and tries to create them if they do not -# -# Arguments: -# - $@ : fully qualified path to the docker compose file(s) with the possible network definitions (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::ensure_external_networks_existence() { - if [[ -z "$*" ]]; then - log error "$(missing_param "ensure_external_networks_existence", "[COMPOSE_FILES]")" - exit 1 - fi - - for compose_file in "$@"; do - if [[ $(yq '.networks' $compose_file) == "null" ]]; then - continue - fi - - local network_keys=$(yq '... comments="" | .networks | keys' $compose_file) - local networks=(${network_keys//- /}) - if [[ "${networks[*]}" != "null" ]]; then - for network_name in "${networks[@]}"; do - # check if the property external is both present and set to true for the current network - # then pull the necessary properties to create the network - if [[ $(name=$network_name yq '.networks.[env(name)] | select(has("external")) | .external' $compose_file) == true ]]; then - local name=$(name=$network_name yq '.networks.[env(name)] | .name' $compose_file) - if [[ $name == "null" ]]; then - name=$network_name - fi - - # network with the name already exists so no need to create it - if docker network ls | awk '{print $2}' | grep -q -w "$name"; then - continue - fi - - local driver=$(name=$network_name yq '.networks.[env(name)] | .driver' $compose_file) - if [[ $driver == "null" ]]; then - driver="overlay" - fi - - local attachable="" - if [[ $(name=$network_name yq '.networks.[env(name)] | .attachable' $compose_file) == true ]]; then - attachable="--attachable" - fi - - log info "Waiting to create external network $name ..." - try \ - "docker network create --scope=swarm \ - -d $driver \ - $attachable \ - $name" \ - throw \ - "Failed to create network $name" - overwrite "Waiting to create external network $name ... Done" - fi - done - fi - done -} - -# Joins a service to a network by updating the service spec to include the network. -# -# Note: Do not remove if not used in the Platform as this is mainly used by -# custom packages that cannot overwrite the docker compose file to add the network connection required. -# -# Arguments: -# - $1 : service name that needs to join the network (eg. analytics-datastore-elastic-search) -# - $2 : network name to join (eg. elastic_public) -# -docker::join_network() { - local -r SERVICE_NAME="${1:?$(missing_param "join_network" "SERVICE_NAME")}" - local -r NETWORK_NAME="${2:?$(missing_param "join_network" "NETWORK_NAME")}" - local network_id - network_id=$(docker network ls --filter name="$NETWORK_NAME$" --format '{{.ID}}') - if [[ -n "${network_id}" ]]; then - if docker service inspect "$SERVICE_NAME" --format "{{.Spec.TaskTemplate.Networks}}" | grep -q "$network_id"; then - log info "Service $SERVICE_NAME is already connected to network $NETWORK_NAME." - else - log info "Waiting to join $SERVICE_NAME to external network $NETWORK_NAME ..." - try \ - "docker service update \ - --network-add name=$NETWORK_NAME \ - $SERVICE_NAME" \ - throw \ - "Failed to join network $NETWORK_NAME" - fi - else - log error "Network $NETWORK_NAME does not exist, cannot join $SERVICE_NAME to it ..." - fi -} - -# Checks the compose file(s) passed in for the existance of a config.file definition to pass to config::set_config_digests -# -# Arguments: -# - $@ : fully qualified path to the compose file(s) to check (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::prepare_config_digests() -{ - if [[ -z "$*" ]]; then - log error "$(missing_param "prepare_config_digests", "[COMPOSE_FILES]")" - exit 1 - fi - - for compose_file in "$@"; do - local files=($(yq '.configs."*.*".file' "$compose_file")) - if [[ "${files[*]}" != "null" ]]; then - config::set_config_digests "$compose_file" - fi - done -} - -# Checks the compose file(s) passed in for the existance of a config.lables.name definition to pass to config::remove_stale_service_configs -# To ensure that the service has the most up to date config digest -# -# Arguments: -# - $@ : fully qualified path to the compose file(s) to check (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::cleanup_stale_configs() -{ - if [[ -z "$*" ]]; then - log error "$(missing_param "cleanup_stale_configs", "[COMPOSE_FILES]")" - exit 1 - fi - - for compose_file in "$@"; do - local label_names=($(yq '.configs."*.*".labels.name' "$compose_file" | sort -u)) - if [[ "${label_names[*]}" != "null" ]]; then - for label_name in "${label_names[@]}"; do - config::remove_stale_service_configs "$compose_file" "${label_name}" - done - fi - done -} diff --git a/utils/log.sh b/utils/log.sh deleted file mode 100644 index 6a77bde9..00000000 --- a/utils/log.sh +++ /dev/null @@ -1,228 +0,0 @@ -#!/bin/bash - -set -uo pipefail - -# Global constants -PREV_LINE="\e[1A" # moves cursor to previous line -CLEAR_LINE="\e[K" # clears the current line the cursor is on -CLEAR_PREV_LINE="${PREV_LINE}${PREV_LINE}${CLEAR_LINE}" - -# Defaults -DEBUG="${DEBUG:-0}" -BASHLOG_FILE="${BASHLOG_FILE:-0}" - -root_log_file_path="/tmp/logs" -LOG_FILE_PATH="${root_log_file_path}/${BASHLOG_FILE_PATH:-platform.log}" - -function _log_exception() { - ( - BASHLOG_FILE=0 - BASHLOG_JSON=0 - BASHLOG_SYSLOG=0 - - log 'error' "Logging Exception: ${@}" - ) -} - -function log() { - local date_format="${BASHLOG_DATE_FORMAT:-+%F %T}" - local date="$(date "${date_format}")" - local date_s="$(date "+%s")" - - local file="${BASHLOG_FILE:-0}" - local file_path="${LOG_FILE_PATH:-/tmp/$(basename "${0}").log}" - - local json="${BASHLOG_JSON:-0}" - local json_path="${BASHLOG_JSON_PATH:-/tmp/$(basename "${0}").log.json}" - - local syslog="${BASHLOG_SYSLOG:-0}" - local tag="${BASHLOG_SYSLOG_TAG:-$(basename "${0}")}" - local facility="${BASHLOG_SYSLOG_FACILITY:-local0}" - local pid="${$}" - - local level="${1}" - local upper="$(echo "${level}" | awk '{print toupper($0)}')" - local debug_level="${DEBUG:-0}" - - shift 1 - - local line="${@}" - - # RFC 5424 - # - # Numerical Severity - # Code - # - # 0 Emergency: system is unusable - # 1 Alert: action must be taken immediately - # 2 Critical: critical conditions - # 3 Error: error conditions - # 4 Warning: warning conditions - # 5 Notice: normal but significant condition - # 6 Informational: informational messages - # 7 Debug: debug-level messages - - local -A severities - severities['DEBUG']=7 - severities['INFO']=6 - severities['NOTICE']=5 # Unused - severities['WARN']=4 - severities['ERROR']=3 - severities['CRIT']=2 # Unused - severities['ALERT']=1 # Unused - severities['EMERG']=0 # Unused - - local severity="${severities[${upper}]:-3}" - - if [ "${debug_level}" -gt 0 ] || [ "${severity}" -lt 7 ]; then - - if [ "${syslog}" -eq 1 ]; then - local syslog_line="${upper}: ${line}" - - logger \ - --id="${pid}" \ - -t "${tag}" \ - -p "${facility}.${severity}" \ - "${syslog_line}" || - _log_exception "logger --id=\"${pid}\" -t \"${tag}\" -p \"${facility}.${severity}\" \"${syslog_line}\"" - fi - - if [ "${file}" -eq 1 ]; then - clean_line="${line//\\e[1A/}" - clean_line="${clean_line//\\e[K/}" - local file_line="${date} [${upper}] ${clean_line}" - echo -e "${file_line}" >>"${file_path}" || - _log_exception "echo -e \"${file_line}\" >> \"${file_path}\"" - fi - - if [ "${json}" -eq 1 ]; then - local json_line="$(printf '{"timestamp":"%s","level":"%s","message":"%s"}' "${date_s}" "${level}" "${line}")" - echo -e "${json_line}" >>"${json_path}" || - _log_exception "echo -e \"${json_line}\" >> \"${json_path}\"" - fi - - fi - - local -A colours - colours['DEBUG']='\033[34m' # Blue - colours['INFO']='\033[32m' # Green - colours['NOTICE']='' # Unused - colours['WARN']='\033[33m' # Yellow - colours['ERROR']='\033[31m' # Red - colours['CRIT']='' # Unused - colours['ALERT']='' # Unused - colours['EMERG']='' # Unused - colours['DEFAULT']='\033[0m' # Default - - local -A emoticons - emoticons['DEBUG']='🔷' - emoticons['INFO']='❕' - emoticons['NOTICE']='💡' - emoticons['WARN']='🔶' - emoticons['ERROR']='❌' - emoticons['CRIT']='⛔' - emoticons['ALERT']='❗❗' - emoticons['EMERG']='🚨' - emoticons['DEFAULT']='' - - local norm="${colours['DEFAULT']}" - local colour="${colours[${upper}]:-\033[31m}" - - if [[ "${line}" == *"${CLEAR_PREV_LINE}"* ]]; then - # Append package name dynamically when override - line="${CLEAR_PREV_LINE}[$(dirname -- "$0" | sed -e 's/-/ /g' -e 's/\b\(.\)/\u\1/g')] ${line#*"$CLEAR_PREV_LINE"}" - else - line="[$(dirname -- "$0" | sed -e 's/-/ /g' -e 's/\b\(.\)/\u\1/g')] ${line}" - fi - - local std_line="${colour} ${emoticons[${upper}]} ${line}${norm}" - - # Standard Output (Pretty) - case "${level}" in - 'default' | 'info' | 'warn') - echo -e "${std_line}" - ;; - 'debug') - if [ "${debug_level}" -gt 0 ]; then - echo -e "${std_line}" - fi - ;; - 'error') - echo -e "${std_line}" >&2 - ;; - *) - log 'error' "Undefined log level trying to log: ${@}" - ;; - esac -} - -# This is an option if you want to log every single command executed, -# but it will significantly impact script performance and unit tests will fail -if [[ $DEBUG -eq 1 ]]; then - declare -g prev_cmd="null" - declare -g this_cmd="null" - - trap 'prev_cmd=$this_cmd; this_cmd=$BASH_COMMAND; log debug $this_cmd' DEBUG -fi - -# A function that will return a message called when of parameter not provided -# -# Arguments: -# - $1 : optional - function name missing the parameter -# - $2 : optional - name of the parameter missing -missing_param() { - local FUNC_NAME=${1:-""} - local ARG_NAME=${2:-""} - - echo "FATAL: ${FUNC_NAME} parameter ${ARG_NAME} not provided" -} - -# Overwrites the last echo'd command with what is provided -# -# Arguments: -# - $1 : message (eg. "Setting passwords... Done") -overwrite() { - local -r MESSAGE=${1:?$(missing_param "overwrite")} - if [ "${DEBUG}" -eq 1 ]; then - log info "${MESSAGE}" - else - log info "${CLEAR_PREV_LINE}${MESSAGE}" - fi -} - -# Execute a command handle logging of the output -# -# Arguments: -# - $1 : command (eg. "docker service rm elastic-search") -# - $2 : throw or catch (eg. "throw", "catch") -# - $3 : error message (eg. "Failed to remove elastic-search service") -try() { - local -r COMMAND=${1:?$(missing_param "try" "COMMAND")} - local -r SHOULD_THROW=${2:-"throw"} - local -r ERROR_MESSAGE=${3:?$(missing_param "try" "ERROR_MESSAGE")} - - if [ "${BASHLOG_FILE}" -eq 1 ]; then - if ! eval "$COMMAND" >>"$LOG_FILE_PATH" 2>&1; then - log error "$ERROR_MESSAGE" - if [[ "$SHOULD_THROW" == "throw" ]]; then - exit 1 - fi - fi - else - if [ "${DEBUG}" -eq 1 ]; then - if ! eval "$COMMAND"; then - log error "$ERROR_MESSAGE" - if [[ "$SHOULD_THROW" == "throw" ]]; then - exit 1 - fi - fi - else - if ! eval "$COMMAND" 1>/dev/null; then - log error "$ERROR_MESSAGE" - if [[ "$SHOULD_THROW" == "throw" ]]; then - exit 1 - fi - fi - fi - fi -}