diff --git a/.vscode/settings.json b/.vscode/settings.json index 3f6e69e1..8d187f9c 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -6,7 +6,7 @@ "editor.defaultFormatter": "esbenp.prettier-vscode" }, "editor.codeActionsOnSave": { - "source.fixAll": true + "source.fixAll": "explicit" }, "json.schemas": [ { diff --git a/Dockerfile b/Dockerfile index 44d3cef5..e33f100c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM openhie/package-base:2.1.3 +FROM openhie/package-base:2.2.0 # Install yq RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.23.1/yq_linux_amd64 -o /usr/bin/yq diff --git a/utils/config-utils.sh b/utils/config-utils.sh deleted file mode 100755 index 01a6156c..00000000 --- a/utils/config-utils.sh +++ /dev/null @@ -1,433 +0,0 @@ -#!/bin/bash -# -# Library name: config -# This is a library that contains functions to assist with docker configs -# -# For functions using `declare -n`, note the following explanation https://linuxhint.com/bash_declare_command/#:~:text=giving%20them%20attributes.-,Namerefs,-If%20you%20are - -. "$(pwd)/utils/log.sh" - -# Sets the digest variables for the conf raft files in the provided docker compose file -# -# Requirements: -# - All configs must have a file and name property -# - The name property must end in -${DIGEST_VAR_NAME:?err} (eg. name: my-file-${MY_FILE_DIGEST:?err}) -# -# Arguments: -# - $1 : docker compose directory path (eg. /home/user/project/docker-compose.yml) -# -# Exports: -# As many digest environment variables as are declared in the provided docker compose file -# -config::set_config_digests() { - local -r DOCKER_COMPOSE_PATH="${1:?$(missing_param "set_config_digests")}" - - # Get configs files and names from yml file - local -r files=($(yq '.configs."*.*".file' "${DOCKER_COMPOSE_PATH}")) - local -r names=($(yq '.configs."*.*".name' "${DOCKER_COMPOSE_PATH}")) - local -r compose_folder_path="${DOCKER_COMPOSE_PATH%/*}" - - if [[ "${files[*]}" != *"null"* ]] && [[ "${names[*]}" != *"null"* ]]; then - log info "Setting config digests" - - for ((i = 0; i < ${#files[@]}; i++)); do - file=${files[$i]} - name=${names[$i]} - - file_name="${compose_folder_path}${file//\.\///}" # TODO: Throw an error if the file name is too long to allow for a unique enough digest - env_var_name=$(echo "${name}" | grep -P -o "{.*:?err}" | sed 's/[{}]//g' | sed 's/:?err//g') - - if [[ -n "$env_var_name" ]]; then - # generate and truncate the digest to conform to the 64 character restriction on docker config names - env_declaration_characters=":?err" # '${:?err}' from setting an env variable - remainder=$((64 - (${#name} - ${#env_var_name} - ${#env_declaration_characters}))) - export "${env_var_name}"="$(cksum "${file_name}" | awk '{print $1}' | cut -c -${remainder})" - fi - done - elif [[ "${files[*]}" == *"null"* ]]; then - log error "No files found to set the digest in:\n $DOCKER_COMPOSE_PATH" - exit 1 - else - log error "You should specify names for the files in:\n $DOCKER_COMPOSE_PATH" - exit 1 - fi -} - -# Removes stale docker configs based on the provided docker-compose file -# -# Requirements: -# - All configs must have a file and name property -# - The name property must end in -${DIGEST_VAR_NAME:?err} (eg. name: my-file-${MY_FILE_DIGEST:?err}) -# -# Arguments: -# - $1 : docker compose directory path (eg. /home/user/project/docker-compose.yml) -# - $2 : config label (eg. logstash) -# -config::remove_stale_service_configs() { - local -r DOCKER_COMPOSE_PATH="${1:?$(missing_param "remove_stale_service_configs" "DOCKER_COMPOSE_PATH")}" - local -r CONFIG_LABEL="${2:?$(missing_param "remove_stale_service_configs" "CONFIG_LABEL")}" - - local -r compose_names=($(yq '.configs."*.*".name' "${DOCKER_COMPOSE_PATH}")) - local configs_to_remove=() - - if [[ "${compose_names[*]}" != "null" ]]; then - for compose_name in "${compose_names[@]}"; do - compose_name_without_env=$(echo "${compose_name}" | sed 's/-\${.*//g') - - compose_name_occurences=$(for word in "${compose_names[@]}"; do echo "${word}"; done | grep -c "${compose_name_without_env}") - if [[ $compose_name_occurences -gt "1" ]]; then - log warn "Warning: Duplicate config name (${compose_name_without_env}) was found in ${DOCKER_COMPOSE_PATH}" - fi - - raft_ids=($(docker config ls -f "label=name=${CONFIG_LABEL}" -f "name=${compose_name_without_env}" --format "{{.ID}}")) - # Only keep the most recent of all configs with the same name - if [[ ${#raft_ids[@]} -gt 1 ]]; then - most_recent_raft_id="${raft_ids[0]}" - for ((i = 1; i < ${#raft_ids[@]}; i++)); do - raft_id=${raft_ids[$i]} - most_recent_raft_created_date=$(docker config inspect -f "{{.CreatedAt}}" "${most_recent_raft_id}") - raft_created_date=$(docker config inspect -f "{{.CreatedAt}}" "${raft_id}") - if [[ $raft_created_date > $most_recent_raft_created_date ]]; then - configs_to_remove+=("${most_recent_raft_id}") - most_recent_raft_id="${raft_id}" - else - configs_to_remove+=("${raft_id}") - fi - done - fi - done - else - log warn "No name files found in the compose config to be removed" - fi - - if [[ "${#configs_to_remove[@]}" -gt 0 ]]; then - try \ - "docker config rm ${configs_to_remove[*]}" \ - catch \ - "Failed to remove configs: ${configs_to_remove[*]}" - fi -} - -# A function that exists in a loop to see how long that loop has run for, providing a warning -# at the time specified in argument $3, and exits with code 124 after the time specified in argument $4. -# -# Arguments: -# - $1 : start time of the timeout check -# - $2 : a message containing reference to the loop that timed out -# - $3 : timeout time in seconds, default is 300 seconds -# - $4 : elapsed time to issue running-for-longer-than-expected warning (in seconds), default is 60 seconds -# -config::timeout_check() { - local start_time=$(($1)) - local message=$2 - local exit_time="${3:-300}" - local warning_time="${4:-60}" - - local timeDiff=$(($(date +%s) - $start_time)) - if [[ $timeDiff -ge $warning_time ]] && [[ $timeDiff -lt $(($warning_time + 1)) ]]; then - log warn "Warning: Waited $warning_time seconds for $message. This is taking longer than it should..." - elif [[ $timeDiff -ge $exit_time ]]; then - log error "Fatal: Waited $exit_time seconds for $message. Exiting..." - exit 124 - fi -} - -# A generic function confirming whether or not a containerized api is reachable -# -# Requirements: -# - The function attempts to start up a helper container using the jembi/await-helper image. It is therefore necessary -# to specify the docker-compose file to deploy the await-helper container which the await_service_running function -# relies on. Details on configuring the await-helper can be found at https://github.com/jembi/platform-await-helper. -# -# Arguments: -# - $1 : the service being awaited -# - $2 : path to await-helper compose.yml file (eg. ~/projects/platform/dashboard-visualiser-jsreport/docker-compose.await-helper.yml) -# - $3 : desired number of instances of the awaited-service -# - $4 : stack name that the service falls under (eg. openhim) -# - $5 : (optional) the max time allowed to wait for a service's response, defaults to 300 seconds -# - $6 : (optional) elapsed time to throw a warning, defaults to 60 seconds -# -config::await_service_running() { - local -r SERVICE_NAME="${1:?$(missing_param "await_service_running" "SERVICE_NAME")}" - local -r AWAIT_HELPER_FILE_PATH="${2:?$(missing_param "await_service_running" "AWAIT_HELPER_FILE_PATH")}" - local -r SERVICE_INSTANCES="${3:?$(missing_param "await_service_running" "SERVICE_INSTANCES")}" - local -r STACK_NAME="${4:?$(missing_param "await_service_running" "STACK_NAME")}" - local -r exit_time="${5:-}" - local -r warning_time="${6:-}" - local start_time - start_time=$(date +%s) - - docker service rm "$STACK_NAME"_await-helper &>/dev/null - - try "docker stack deploy -c $AWAIT_HELPER_FILE_PATH $STACK_NAME" throw "Failed to deploy await helper" - until [[ $(docker service ls -f name="$STACK_NAME"_"$SERVICE_NAME" --format "{{.Replicas}}") == *"$SERVICE_INSTANCES/$SERVICE_INSTANCES"* ]]; do - config::timeout_check "$start_time" "$SERVICE_NAME to start" "$exit_time" "$warning_time" - sleep 1 - done - - start_time=$(date +%s) # Reintialize for the second loop - local await_helper_state - await_helper_state=$(docker service ps "$STACK_NAME"_await-helper --format "{{.CurrentState}}") - until [[ $await_helper_state == *"Complete"* ]]; do - config::timeout_check "$start_time" "$SERVICE_NAME status check" "$exit_time" "$warning_time" - sleep 1 - - await_helper_state=$(docker service ps "$STACK_NAME"_await-helper --format "{{.CurrentState}}") - if [[ $await_helper_state == *"Failed"* ]] || [[ $await_helper_state == *"Rejected"* ]]; then - log error "Fatal: Received error when trying to verify state of $SERVICE_NAME. Error: - $(docker service ps "$STACK_NAME"_await-helper --no-trunc --format '{{.Error}}')" - exit 1 - fi - done - - try "docker service rm "$STACK_NAME"_await-helper" catch "Failed to remove await-helper" -} - -# A function which removes a config importing service on successful completion, and exits with an error otherwise -# -# Arguments: -# - $1 : stack name that the service falls under (eg. openhim) -# - $2 : the name of the config importer -# - $3 : (optional) the timeout time for the config importer to run, defaults to 300 seconds -# - $4 : (optional) elapsed time to throw a warning, defaults to 60 seconds -# -config::remove_config_importer() { - local -r STACK_NAME="${1:?$(missing_param "remove_config_importer" "STACK_NAME")}" - local -r CONFIG_IMPORTER_SERVICE_NAME="${2:?$(missing_param "remove_config_importer" "CONFIG_IMPORTER_SERVICE_NAME")}" - local -r exit_time="${3:-}" - local -r warning_time="${4:-}" - local -r start_time=$(date +%s) - - local config_importer_state - - if [[ -z $(docker service ps "$STACK_NAME"_"$CONFIG_IMPORTER_SERVICE_NAME") ]]; then - log info "${STACK_NAME}_$CONFIG_IMPORTER_SERVICE_NAME service cannot be removed as it does not exist!" - exit 0 - fi - - config_importer_state=$(docker service ps "$STACK_NAME"_"$CONFIG_IMPORTER_SERVICE_NAME" --format "{{.CurrentState}}") - until [[ $config_importer_state == *"Complete"* ]]; do - config::timeout_check "$start_time" "$CONFIG_IMPORTER_SERVICE_NAME to run" "$exit_time" "$warning_time" - sleep 1 - - config_importer_state=$(docker service ps "$STACK_NAME"_"$CONFIG_IMPORTER_SERVICE_NAME" --format "{{.CurrentState}}") - if [[ $config_importer_state == *"Failed"* ]] || [[ $config_importer_state == *"Rejected"* ]]; then - log error "Fatal: $CONFIG_IMPORTER_SERVICE_NAME failed with error: - $(docker service ps ${STACK_NAME}_"$CONFIG_IMPORTER_SERVICE_NAME" --no-trunc --format '{{.Error}}')" - exit 1 - fi - done - - try "docker service rm "$STACK_NAME"_$CONFIG_IMPORTER_SERVICE_NAME" catch "Failed to remove config importer" -} - -# Waits for the provided service to be removed -# -# Arguments: -# - $1 : stack name that the service falls under (eg. openhim) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# -config::await_service_removed() { - local -r STACK_NAME="${1:?$(missing_param "await_service_removed", "STACK_NAME")}" - local -r SERVICE_NAME="${2:?$(missing_param "await_service_removed", "SERVICE_NAME")}" - local start_time=$(date +%s) - - until [[ -z $(docker stack ps $STACK_NAME -qf name="${STACK_NAME}_${SERVICE_NAME}" 2>/dev/null) ]]; do - config::timeout_check "$start_time" "${SERVICE_NAME} to be removed" - sleep 1 - done - log info "Service $SERVICE_NAME successfully removed" -} - -# Generates configs for a service from a folder and adds them to a temp docker-compose file -# -# Arguments: -# - $1 : service name (eg. data-mapper-logstash) -# - $2 : target base (eg. /usr/share/logstash/) -# - $3 : target folder path in absolute format (eg. "$PATH_TO_FILE"/pipeline) -# - $4 : compose file path (eg. "$PATH_TO_FILE") -# -# Exports: -# All exports are required for yq to process the values and are not intended for external use -# - service_config_query -# - config_target -# - config_source -# - config_query -# - config_file -# - config_label_name -# - config_service_name -# -config::generate_service_configs() { - local -r SERVICE_NAME=${1:?$(missing_param "generate_service_configs" "SERVICE_NAME")} - local -r TARGET_BASE=${2:?$(missing_param "generate_service_configs" "TARGET_BASE")} - local -r TARGET_FOLDER_PATH=${3:?$(missing_param "generate_service_configs" "TARGET_FOLDER_PATH")} - local -r COMPOSE_PATH=${4:?$(missing_param "generate_service_configs" "COMPOSE_PATH")} - local -r LABEL_NAME=${5:?$(missing_param "generate_service_configs" "LABEL_NAME")} - local -r TARGET_FOLDER_NAME=$(basename "${TARGET_FOLDER_PATH}") - local count=0 - - try \ - "touch ${COMPOSE_PATH}/docker-compose.tmp.yml" \ - throw \ - "Failed to create temp service config compose file" - - find "${TARGET_FOLDER_PATH}" -maxdepth 10 -mindepth 1 -type f | while read -r file; do - file_name=${file/"${TARGET_FOLDER_PATH%/}"/} - file_name=${file_name:1} - file_hash=$(cksum "${file}" | awk '{print $1}') - - # for these variables to be visible by yq they need to be exported - export service_config_query=".services.${SERVICE_NAME}.configs[${count}]" - export config_target="${TARGET_BASE%/}/${TARGET_FOLDER_NAME}/${file_name}" - export config_source="${SERVICE_NAME}-${file_hash}" - - export config_query=".configs.${config_source}" - export config_file="./${TARGET_FOLDER_NAME}/${file_name}" - export config_label_name=$LABEL_NAME - export config_service_name=$SERVICE_NAME - - yq -i ' - .version = "3.9" | - eval(strenv(service_config_query)).target = env(config_target) | - eval(strenv(service_config_query)).source = strenv(config_source) | - eval(strenv(config_query)).file = strenv(config_file) | - eval(strenv(config_query)).name = strenv(config_source) | - eval(strenv(config_query)).labels.name = strenv(config_label_name) | - eval(strenv(config_query)).labels.service = strenv(config_service_name) - ' "${COMPOSE_PATH}/docker-compose.tmp.yml" - - count=$((count + 1)) - done -} - -# Replaces all environment variables in a file with the environment variable value -# -# Arguments: -# - $1 : the path to the file that you wish to substitute env vars into (eg. "${COMPOSE_FILE_PATH}"/config.ini) -# -config::substitute_env_vars() { - local -r FILE_PATH="${1:?$(missing_param "substitute_env_vars")}" - config_with_env=$(envsubst <"${FILE_PATH}") - echo "" >"${FILE_PATH}" - echo "$config_with_env" >>"${FILE_PATH}" -} - -# Modify a variable to contain the necessary `--config-rm` and `--config-add` arguments to update a service's -# configs based off newly created docker configs for a provided folder. The modified variable must then be -# used in a `docker service update` command, like follows: -# ``` -# service_update_args="" -# config::update_service_configs service_update_args /usr/share/logstash/ "$PATH_TO_FILE"/pipeline cares -# docker service update $service_update_args instant_data-mapper-logstash -# ``` -# Reference arguments: -# - $1 : config update variable name (eg. service_update_args) -# -# Arguments: -# - $2 : target base (eg. /usr/share/logstash/) -# - $3 : target folder path in absolute format (eg. "$PATH_TO_FILE"/pipeline) -# - $4 : config label name (eg. cares) -# -config::update_service_configs() { - declare -n REF_config_update_var="${1:?$(missing_param "update_service_configs" "REF_config_update_var")}" - local -r TARGET_BASE=${2:?$(missing_param "update_service_configs" "TARGET_BASE")} - local -r TARGET_FOLDER_PATH=${3:?$(missing_param "update_service_configs" "TARGET_FOLDER_PATH")} - local -r CONFIG_LABEL_NAME="${4:?$(missing_param "update_service_configs" "CONFIG_LABEL_NAME")}" - local config_rm_string="" - local config_add_string="" - - files=$(find "${TARGET_FOLDER_PATH}" -maxdepth 10 -mindepth 1 -type f) - - for file in $files; do - file_name=${file/"${TARGET_FOLDER_PATH%/}"/} - file_name=${file_name:1} - file_hash=$(md5sum "${file}" | awk '{print $1}') - config_file="${TARGET_FOLDER_PATH}/${file_name}" - config_target="${TARGET_BASE%/}/${file_name}" - config_name=$(basename "$file_name")-$file_hash - old_config_name=$(docker config inspect --format="{{.Spec.Name}}" "$(docker config ls -qf name="$(basename "$file_name")")" 2>/dev/null) - - if [[ "$config_name" != "$old_config_name" ]]; then - if [[ -n $old_config_name ]]; then - config_rm_string+="--config-rm $old_config_name " - fi - config_add_string+="--config-add source=$config_name,target=$config_target " - - try \ - "docker config create --label name=$CONFIG_LABEL_NAME $config_name $config_file" \ - catch \ - "Failed to create config" - fi - done - - REF_config_update_var+="$config_rm_string $config_add_string" -} - -# Modify a variable to contain the necessary `--env-add` arguments to update a service's -# environment specified in a .env file. The modified variable must then be -# used in a `docker service update` command, like follows: -# ``` -# service_update_args="" -# config::env_var_add_from_file service_update_args "$PATH_TO_FILE"/.env.add -# docker service update $service_update_args instant_data-mapper-logstash -# ``` -# Reference arguments: -# - $1 : service update variable name (eg. service_update_args) -# -# Arguments: -# - $2 : .env file (eg. "$PATH_TO_FILE"/.env.add) -# -config::env_var_add_from_file() { - declare -n REF_service_update_var="${1:?$(missing_param "env_var_add_from_file" "REF_service_update_var")}" - local -r ENV_FILE=${2:?$(missing_param "env_var_add_from_file" "ENV_FILE")} - - if [[ ! -f $ENV_FILE ]]; then - log error "$ENV_FILE: No such file or directory. Exiting..." - return 1 - fi - - readarray -t env_vars <"$ENV_FILE" - for env_var in "${env_vars[@]}"; do - REF_service_update_var+=" --env-add $env_var" - done -} - -# Modify a variable to contain the necessary `--env-add` arguments to update a service's -# environment based on the provided env var. The modified variable must then be -# used in a `docker service update` command, like follows: -# ``` -# service_update_args="" -# config::env_var_add service_update_args MY_ENV_VAR=my_value -# docker service update $service_update_args instant_data-mapper-logstash -# ``` -# Reference arguments: -# - $1 : service update variable name (eg. service_update_args) -# -# Arguments: -# - $2 : env var (eg. MY_ENV_VAR=my_value) -# -config::env_var_add() { - declare -n REF_service_update_var="${1:?$(missing_param "env_var_add" "REF_service_update_var")}" - local -r ENV_VAR=${2:?$(missing_param "env_var_add" "ENV_VAR")} - - REF_service_update_var+=" --env-add $ENV_VAR" -} - -# Waits for the provided service to be reachable by checking logs -# -# Arguments: -# $1 : service name (eg. analytics-datastore-elastic-search) -# $2 : stack name that the service falls under (eg. openhim) -# $3 : log string to be checked (eg. Starting) -# -config::await_service_reachable() { - local -r SERVICE_NAME=${1:?$(missing_param "await_service_reachable" "SERVICE_NAME")} - local -r STACK_NAME=${2:?$(missing_param "await_service_reachable" "STACK_NAME")} - local -r LOG_MESSAGE=${3:?$(missing_param "await_service_reachable" "LOG_MESSAGE")} - local -r start_time=$(date +%s) - - until [[ $(docker service logs --tail all "${STACK_NAME}"_"${SERVICE_NAME}" 2>/dev/null | grep -c "${LOG_MESSAGE}") -gt 0 ]]; do - config::timeout_check "$start_time" "${STACK_NAME}_$SERVICE_NAME to be reachable" - sleep 1 - done -} diff --git a/utils/docker-utils.sh b/utils/docker-utils.sh deleted file mode 100644 index 6c960bbf..00000000 --- a/utils/docker-utils.sh +++ /dev/null @@ -1,574 +0,0 @@ -#!/bin/bash -# -# Library name: docker -# This is a library that contains functions to assist with docker actions - -. "$(pwd)/utils/config-utils.sh" -. "$(pwd)/utils/log.sh" - -# Gets current status of the provided service -# -# Arguments: -# - $1 : service name (eg. analytics-datastore-elastic-search) -# -docker::get_current_service_status() { - local -r SERVICE_NAME=${1:?$(missing_param "get_current_service_status")} - docker service ps "${SERVICE_NAME}" --format "{{.CurrentState}}" 2>/dev/null -} - -# Gets unique errors from the provided service -# -# Arguments: -# - $1 : service name (eg. analytics-datastore-elastic-search) -# -docker::get_service_unique_errors() { - local -r SERVICE_NAME=${1:?$(missing_param "get_service_unique_errors")} - - # Get unique error messages using sort -u - docker service ps "${SERVICE_NAME}" --no-trunc --format '{{ .Error }}' 2>&1 | sort -u -} - -# Waits for a container to be up -# -# Arguments: -# - $1 : stack name that the service falls under (eg. elastic) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# -docker::await_container_startup() { - local -r STACK_NAME=${1:?$(missing_param "await_container_startup", "STACK_NAME")} - local -r SERVICE_NAME=${2:?$(missing_param "await_container_startup", "SERVICE_NAME")} - - log info "Waiting for ${SERVICE_NAME} to start up..." - local start_time - start_time=$(date +%s) - until [[ -n $(docker service ls -qf name="${STACK_NAME}"_"${SERVICE_NAME}") ]]; do - config::timeout_check "${start_time}" "${SERVICE_NAME} to start" - sleep 1 - done - overwrite "Waiting for ${SERVICE_NAME} to start up... Done" -} - -# Waits for a container to be up -# -# Arguments: -# - $1 : stack name that the service falls under (eg. elastic) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# - $3 : service status (eg. running) -# -docker::await_service_status() { - local -r STACK_NAME=${1:?$(missing_param "await_service_status" "STACK_NAME")} - local -r SERVICE_NAME=${2:?$(missing_param "await_service_status" "SERVICE_NAME")} - local -r SERVICE_STATUS=${3:?$(missing_param "await_service_status" "SERVICE_STATUS")} - local -r start_time=$(date +%s) - local error_message=() - - log info "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be ${SERVICE_STATUS}..." - until [[ $(docker::get_current_service_status ${STACK_NAME}_${SERVICE_NAME}) == *"${SERVICE_STATUS}"* ]]; do - config::timeout_check "${start_time}" "${STACK_NAME}_${SERVICE_NAME} to start" - sleep 1 - - # Get unique error messages using sort -u - new_error_message=($(docker::get_service_unique_errors ${STACK_NAME}_$SERVICE_NAME)) - if [[ -n ${new_error_message[*]} ]]; then - # To prevent logging the same error - if [[ "${error_message[*]}" != "${new_error_message[*]}" ]]; then - error_message=(${new_error_message[*]}) - log error "Deploy error in service ${STACK_NAME}_$SERVICE_NAME: ${error_message[*]}" - fi - - # To exit in case the error is not having the image - if [[ "${new_error_message[*]}" == *"No such image"* ]]; then - log error "Do you have access to pull the image?" - exit 124 - fi - fi - done - overwrite "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be ${SERVICE_STATUS}... Done" -} - -# Waits for a container to be destroyed -# -# Arguments: -# - $1 : stack name that the service container falls under (eg. elastic) -# - $2 : service name (eg. analytics-datastore-elastic-search) -# -docker::await_container_destroy() { - local -r STACK_NAME=${1:?$(missing_param "await_container_destroy", "STACK_NAME")} - local -r SERVICE_NAME=${2:?$(missing_param "await_container_destroy", "SERVICE_NAME")} - - log info "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be destroyed..." - local start_time - start_time=$(date +%s) - until [[ -z $(docker ps -qlf name="${STACK_NAME}_${SERVICE_NAME}") ]]; do - config::timeout_check "${start_time}" "${SERVICE_NAME} to be destroyed" - sleep 1 - done - overwrite "Waiting for ${STACK_NAME}_${SERVICE_NAME} to be destroyed... Done" -} - -# Waits for a service to be destroyed -# -# Arguments: -# - $1 : service name (eg. analytics-datastore-elastic-search) -# - $2 : stack name that the service falls under (eg. elastic) -# -docker::await_service_destroy() { - local -r SERVICE_NAME=${1:?$(missing_param "await_service_destroy", "SERVICE_NAME")} - local -r STACK_NAME=${2:?$(missing_param "await_service_destroy", "STACK_NAME")} - local start_time - start_time=$(date +%s) - - while docker service ls | grep -q "\s${STACK_NAME}_${SERVICE_NAME}\s"; do - config::timeout_check "${start_time}" "${SERVICE_NAME} to be destroyed" - sleep 1 - done -} - -# Removes services containers then the service itself -# This was created to aid in removing volumes, -# since volumes being removed were still attached to some lingering containers after container remove -# -# NB: Global services can't be scale down -# -# Arguments: -# - $1 : stack name that the services fall under (eg. elasticsearch) -# - $@ : service names list (eg. analytics-datastore-elastic-search) -# -docker::service_destroy() { - local -r STACK_NAME=${1:?$(missing_param "service_destroy", "STACK_NAME")} - shift - - if [[ -z "$*" ]]; then - log error "$(missing_param "service_destroy", "[SERVICE_NAMES]")" - exit 1 - fi - - for service_name in "$@"; do - local service="${STACK_NAME}"_$service_name - log info "Waiting for service $service to be removed ... " - if [[ -n $(docker service ls -qf name=$service) ]]; then - if [[ $(docker service ls --format "{{.Mode}}" -f name=$service) != "global" ]]; then - try "docker service scale $service=0" catch "Failed to scale down ${service_name}" - fi - try "docker service rm $service" catch "Failed to remove service ${service_name}" - docker::await_service_destroy "$service_name" "$STACK_NAME" - fi - overwrite "Waiting for service $service_name to be removed ... Done" - done -} - -# Removes the stack and awaits for each service in the stack to be removed -# -# Arguments: -# - $1 : stack name to be removed -# -docker::stack_destroy() { - local -r STACK_NAME=${1:?$(missing_param "stack_destroy")} - log info "Waiting for stack $STACK_NAME to be removed ..." - try "docker stack rm \ - $STACK_NAME" \ - throw \ - "Failed to remove $STACK_NAME" - - local start_time=$(date +%s) - while [[ -n "$(docker stack ps $STACK_NAME 2>/dev/null)" ]] ; do - config::timeout_check "${start_time}" "${STACK_NAME} to be destroyed" - sleep 1 - done - - overwrite "Waiting for stack $STACK_NAME to be removed ... Done" - - log info "Pruning networks ... " - try "docker network prune -f" catch "Failed to prune networks" - overwrite "Pruning networks ... done" - - docker::prune_volumes -} - -# Loops through all current services and builds up a dictionary of volume names currently in use -# (this also considers downed services, as you don't want to prune volumes for downed services) -# It then loops through all volumes and removes any that do not have a service definition attached to it -# -docker::prune_volumes() { - # Create an associative array to act as the dictionary to hold service volume names - # Need to add instant, which the gocli uses but is not defined as a service - declare -A referenced_volumes=(['instant']=true) - - log info "Pruning volumes ... " - - for service in $(docker service ls -q); do - for volume in $(docker service inspect $service --format '{{range .Spec.TaskTemplate.ContainerSpec.Mounts}}{{println .Source}}{{end}}'); do - referenced_volumes[$volume]=true - done - done - - for volume in $(docker volume ls --format {{.Name}}); do - # Check to see if the key (which is the volume name) exists - if [[ -v referenced_volumes[$volume] ]]; then - continue - fi - - # Ignore volumes attached to a container but are not apart of a service definition - local start_time=$(date +%s) - local should_ignore=true - if [[ -n $(docker ps -a -q --filter volume=$volume) ]]; then - local timeDiff=$(($(date +%s) - $start_time)) - until [[ $timeDiff -ge 10 ]]; do - timeDiff=$(($(date +%s) - $start_time)) - if [[ -n $(docker ps -a -q --filter volume=$volume) ]]; then - sleep 1 - else - should_ignore=false - fi - done - if $should_ignore; then - continue - fi - fi - - log info "Waiting for volume $volume to be removed..." - start_time=$(date +%s) - until [[ -z "$(docker volume ls -q --filter name=^$volume$ 2>/dev/null)" ]]; do - docker volume rm $volume >/dev/null 2>&1 - config::timeout_check "${start_time}" "$volume to be removed" "60" "10" - sleep 1 - done - overwrite "Waiting for volume $volume to be removed... Done" - done - - overwrite "Pruning volumes ... done" -} - -# Prunes configs based on a label -# -# Arguments: -# - $@ : configs label list (eg. logstash) -# -docker::prune_configs() { - if [[ -z "$*" ]]; then - log error "$(missing_param "prune_configs", "[CONFIG_LABELS]")" - exit 1 - fi - - for config_name in "$@"; do - # shellcheck disable=SC2046 - if [[ -n $(docker config ls -qf label=name="$config_name") ]]; then - log info "Waiting for configs to be removed..." - - docker config rm $(docker config ls -qf label=name="$config_name") &>/dev/null - - overwrite "Waiting for configs to be removed... Done" - fi - done -} - -# Checks if the image exists, if not it will pull it from docker -# -# Arguments: -# - $@ : images list (eg. bitnami/kafka:3.3.1) -# -docker::check_images_existence() { - if [[ -z "$*" ]]; then - log error "$(missing_param "check_images_existence", "[IMAGES]")" - exit 1 - fi - - local timeout_pull_image - timeout_pull_image=300 - for image_name in "$@"; do - image_name=$(eval echo "$image_name") - if [[ -z $(docker image inspect "$image_name" --format "{{.Id}}" 2>/dev/null) ]]; then - log info "The image $image_name is not found, Pulling from docker..." - try \ - "timeout $timeout_pull_image docker pull $image_name 1>/dev/null" \ - throw \ - "An error occured while pulling the image $image_name" - - overwrite "The image $image_name is not found, Pulling from docker... Done" - fi - done -} - -# Deploys a service -# It will pull images if they don't exist in the local docker hub registry -# It will set config digests (in case a config is defined in the compose file) -# It will remove stale configs -# -# Arguments: -# - $1 : docker stack name to group the service under -# - $2 : docker compose path (eg. /instant/monitoring) -# - $3 : docker compose file (eg. docker-compose.yml or docker-compose.cluster.yml) -# - $@ : (optional) list of docker compose files (eg. docker-compose.cluster.yml docker-compose.dev.yml) -# - $@:4:n : (optional) a marker 'defer-sanity' used to defer deploy::sanity to the caller, can appear anywhere in the optional list -# -docker::deploy_service() { - local -r STACK_NAME="${1:?$(missing_param "deploy_service" "STACK_NAME")}" - local -r DOCKER_COMPOSE_PATH="${2:?$(missing_param "deploy_service" "DOCKER_COMPOSE_PATH")}" - local -r DOCKER_COMPOSE_FILE="${3:?$(missing_param "deploy_service" "DOCKER_COMPOSE_FILE")}" - local docker_compose_param="" - - # Check for the existance of the images - local -r images=($(yq '.services."*".image' "${DOCKER_COMPOSE_PATH}/$DOCKER_COMPOSE_FILE")) - if [[ "${images[*]}" != "null" ]]; then - docker::check_images_existence "${images[@]}" - fi - - local defer_sanity=false - for optional_config in "${@:4}"; do - if [[ -n $optional_config ]]; then - if [[ $optional_config == "defer-sanity" ]]; then - defer_sanity=true - else - docker_compose_param="$docker_compose_param -c ${DOCKER_COMPOSE_PATH}/$optional_config" - fi - fi - done - - docker::prepare_config_digests "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - docker::ensure_external_networks_existence "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - - try "docker stack deploy \ - -c ${DOCKER_COMPOSE_PATH}/$DOCKER_COMPOSE_FILE \ - $docker_compose_param \ - --with-registry-auth \ - ${STACK_NAME}" \ - throw \ - "Wrong configuration in ${DOCKER_COMPOSE_PATH}/$DOCKER_COMPOSE_FILE or in the other supplied compose files" - - docker::cleanup_stale_configs "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - - if [[ $defer_sanity != true ]]; then - docker::deploy_sanity "$STACK_NAME" "$DOCKER_COMPOSE_PATH/$DOCKER_COMPOSE_FILE" ${docker_compose_param//-c /} - fi -} - -# Deploys a config importer -# Sets the config digests, deploys the config importer, removes it and removes the stale configs -# -# Arguments: -# - $1 : stack name that the service falls under -# - $2 : docker compose path (eg. /instant/monitoring/importer/docker-compose.config.yml) -# - $3 : services name (eg. clickhouse-config-importer) -# - $4 : config label (eg. clickhouse kibana) -# -docker::deploy_config_importer() { - local -r STACK_NAME="${1:?$(missing_param "deploy_config_importer" "STACK_NAME")}" - local -r CONFIG_COMPOSE_PATH="${2:?$(missing_param "deploy_config_importer" "CONFIG_COMPOSE_PATH")}" - local -r SERVICE_NAME="${3:?$(missing_param "deploy_config_importer" "SERVICE_NAME")}" - local -r CONFIG_LABEL="${4:?$(missing_param "deploy_config_importer" "CONFIG_LABEL")}" - - log info "Waiting for config importer $SERVICE_NAME to start ..." - ( - if [[ ! -f "$CONFIG_COMPOSE_PATH" ]]; then - log error "No such file: $CONFIG_COMPOSE_PATH" - exit 1 - fi - - config::set_config_digests "$CONFIG_COMPOSE_PATH" - - try \ - "docker stack deploy -c ${CONFIG_COMPOSE_PATH} ${STACK_NAME}" \ - throw \ - "Wrong configuration in $CONFIG_COMPOSE_PATH" - - log info "Waiting to give core config importer time to run before cleaning up service" - - config::remove_config_importer "$STACK_NAME" "$SERVICE_NAME" - config::await_service_removed "$STACK_NAME" "$SERVICE_NAME" - - log info "Removing stale configs..." - config::remove_stale_service_configs "$CONFIG_COMPOSE_PATH" "$CONFIG_LABEL" - overwrite "Removing stale configs... Done" - ) || { - log error "Failed to deploy the config importer: $SERVICE_NAME" - exit 1 - } -} - -# Checks for errors when deploying -# -# Arguments: -# - $1 : stack name that the services falls under -# - $@ : fully qualified path to the compose file(s) with service definitions (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::deploy_sanity() { - local -r STACK_NAME="${1:?$(missing_param "deploy_sanity" "STACK_NAME")}" - # shift off the stack name to get the subset of services to check - shift - - if [[ -z "$*" ]]; then - log error "$(missing_param "deploy_sanity" "[COMPOSE_FILES]")" - exit 1 - fi - - local services=() - for compose_file in "$@"; do - # yq 'keys' returns:"- foo - bar" if you have yml with a foo: and bar: service definition - # which is why we remove the "- " before looping - # it will also return '#' as a key if you have a comment, so we clean them with ' ... comments="" ' first - local compose_services=$(yq '... comments="" | .services | keys' $compose_file) - compose_services=${compose_services//- /} - for service in ${compose_services[@]}; do - # only append unique service to services - if [[ ! ${services[*]} =~ $service ]]; then - services+=($service) - fi - done - done - - for service_name in ${services[@]}; do - docker::await_service_status $STACK_NAME "$service_name" "Running" - done -} - -# Scales services to the passed in replica count -# -# Arguments: -# - $1 : stack name that the services falls under -# - $2 : replicas number (eg. 0 (to scale down) or 1 (to scale up) or 2 (to scale up more)) -# -docker::scale_services() { - local -r STACK_NAME="${1:?$(missing_param "scale_services" "STACK_NAME")}" - local -r REPLICAS="${2:?$(missing_param "scale_services" "REPLICAS")}" - local services=($(docker stack services $STACK_NAME | awk '{print $2}' | tail -n +2)) - for service_name in "${services[@]}"; do - log info "Waiting for $service_name to scale to $REPLICAS ..." - try \ - "docker service scale $service_name=$REPLICAS" \ - catch \ - "Failed to scale $service_name to $REPLICAS" - overwrite "Waiting for $service_name to scale to $REPLICAS ... Done" - done -} - -# Checks if the external networks exist and tries to create them if they do not -# -# Arguments: -# - $@ : fully qualified path to the docker compose file(s) with the possible network definitions (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::ensure_external_networks_existence() { - if [[ -z "$*" ]]; then - log error "$(missing_param "ensure_external_networks_existence", "[COMPOSE_FILES]")" - exit 1 - fi - - for compose_file in "$@"; do - if [[ $(yq '.networks' $compose_file) == "null" ]]; then - continue - fi - - local network_keys=$(yq '... comments="" | .networks | keys' $compose_file) - local networks=(${network_keys//- /}) - if [[ "${networks[*]}" != "null" ]]; then - for network_name in "${networks[@]}"; do - # check if the property external is both present and set to true for the current network - # then pull the necessary properties to create the network - if [[ $(name=$network_name yq '.networks.[env(name)] | select(has("external")) | .external' $compose_file) == true ]]; then - local name=$(name=$network_name yq '.networks.[env(name)] | .name' $compose_file) - if [[ $name == "null" ]]; then - name=$network_name - fi - - # network with the name already exists so no need to create it - if docker network ls | awk '{print $2}' | grep -q -w "$name"; then - continue - fi - - local driver=$(name=$network_name yq '.networks.[env(name)] | .driver' $compose_file) - if [[ $driver == "null" ]]; then - driver="overlay" - fi - - local attachable="" - if [[ $(name=$network_name yq '.networks.[env(name)] | .attachable' $compose_file) == true ]]; then - attachable="--attachable" - fi - - log info "Waiting to create external network $name ..." - try \ - "docker network create --scope=swarm \ - -d $driver \ - $attachable \ - $name" \ - throw \ - "Failed to create network $name" - overwrite "Waiting to create external network $name ... Done" - fi - done - fi - done -} - -# Joins a service to a network by updating the service spec to include the network. -# -# Note: Do not remove if not used in the Platform as this is mainly used by -# custom packages that cannot overwrite the docker compose file to add the network connection required. -# -# Arguments: -# - $1 : service name that needs to join the network (eg. analytics-datastore-elastic-search) -# - $2 : network name to join (eg. elastic_public) -# -docker::join_network() { - local -r SERVICE_NAME="${1:?$(missing_param "join_network" "SERVICE_NAME")}" - local -r NETWORK_NAME="${2:?$(missing_param "join_network" "NETWORK_NAME")}" - local network_id - network_id=$(docker network ls --filter name="$NETWORK_NAME$" --format '{{.ID}}') - if [[ -n "${network_id}" ]]; then - if docker service inspect "$SERVICE_NAME" --format "{{.Spec.TaskTemplate.Networks}}" | grep -q "$network_id"; then - log info "Service $SERVICE_NAME is already connected to network $NETWORK_NAME." - else - log info "Waiting to join $SERVICE_NAME to external network $NETWORK_NAME ..." - try \ - "docker service update \ - --network-add name=$NETWORK_NAME \ - $SERVICE_NAME" \ - throw \ - "Failed to join network $NETWORK_NAME" - fi - else - log error "Network $NETWORK_NAME does not exist, cannot join $SERVICE_NAME to it ..." - fi -} - -# Checks the compose file(s) passed in for the existance of a config.file definition to pass to config::set_config_digests -# -# Arguments: -# - $@ : fully qualified path to the compose file(s) to check (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::prepare_config_digests() -{ - if [[ -z "$*" ]]; then - log error "$(missing_param "prepare_config_digests", "[COMPOSE_FILES]")" - exit 1 - fi - - for compose_file in "$@"; do - local files=($(yq '.configs."*.*".file' "$compose_file")) - if [[ "${files[*]}" != "null" ]]; then - config::set_config_digests "$compose_file" - fi - done -} - -# Checks the compose file(s) passed in for the existance of a config.lables.name definition to pass to config::remove_stale_service_configs -# To ensure that the service has the most up to date config digest -# -# Arguments: -# - $@ : fully qualified path to the compose file(s) to check (eg. /instant/interoperability-layer-openhim/docker-compose.yml) -# -docker::cleanup_stale_configs() -{ - if [[ -z "$*" ]]; then - log error "$(missing_param "cleanup_stale_configs", "[COMPOSE_FILES]")" - exit 1 - fi - - for compose_file in "$@"; do - local label_names=($(yq '.configs."*.*".labels.name' "$compose_file" | sort -u)) - if [[ "${label_names[*]}" != "null" ]]; then - for label_name in "${label_names[@]}"; do - config::remove_stale_service_configs "$compose_file" "${label_name}" - done - fi - done -} diff --git a/utils/log.sh b/utils/log.sh deleted file mode 100644 index 6a77bde9..00000000 --- a/utils/log.sh +++ /dev/null @@ -1,228 +0,0 @@ -#!/bin/bash - -set -uo pipefail - -# Global constants -PREV_LINE="\e[1A" # moves cursor to previous line -CLEAR_LINE="\e[K" # clears the current line the cursor is on -CLEAR_PREV_LINE="${PREV_LINE}${PREV_LINE}${CLEAR_LINE}" - -# Defaults -DEBUG="${DEBUG:-0}" -BASHLOG_FILE="${BASHLOG_FILE:-0}" - -root_log_file_path="/tmp/logs" -LOG_FILE_PATH="${root_log_file_path}/${BASHLOG_FILE_PATH:-platform.log}" - -function _log_exception() { - ( - BASHLOG_FILE=0 - BASHLOG_JSON=0 - BASHLOG_SYSLOG=0 - - log 'error' "Logging Exception: ${@}" - ) -} - -function log() { - local date_format="${BASHLOG_DATE_FORMAT:-+%F %T}" - local date="$(date "${date_format}")" - local date_s="$(date "+%s")" - - local file="${BASHLOG_FILE:-0}" - local file_path="${LOG_FILE_PATH:-/tmp/$(basename "${0}").log}" - - local json="${BASHLOG_JSON:-0}" - local json_path="${BASHLOG_JSON_PATH:-/tmp/$(basename "${0}").log.json}" - - local syslog="${BASHLOG_SYSLOG:-0}" - local tag="${BASHLOG_SYSLOG_TAG:-$(basename "${0}")}" - local facility="${BASHLOG_SYSLOG_FACILITY:-local0}" - local pid="${$}" - - local level="${1}" - local upper="$(echo "${level}" | awk '{print toupper($0)}')" - local debug_level="${DEBUG:-0}" - - shift 1 - - local line="${@}" - - # RFC 5424 - # - # Numerical Severity - # Code - # - # 0 Emergency: system is unusable - # 1 Alert: action must be taken immediately - # 2 Critical: critical conditions - # 3 Error: error conditions - # 4 Warning: warning conditions - # 5 Notice: normal but significant condition - # 6 Informational: informational messages - # 7 Debug: debug-level messages - - local -A severities - severities['DEBUG']=7 - severities['INFO']=6 - severities['NOTICE']=5 # Unused - severities['WARN']=4 - severities['ERROR']=3 - severities['CRIT']=2 # Unused - severities['ALERT']=1 # Unused - severities['EMERG']=0 # Unused - - local severity="${severities[${upper}]:-3}" - - if [ "${debug_level}" -gt 0 ] || [ "${severity}" -lt 7 ]; then - - if [ "${syslog}" -eq 1 ]; then - local syslog_line="${upper}: ${line}" - - logger \ - --id="${pid}" \ - -t "${tag}" \ - -p "${facility}.${severity}" \ - "${syslog_line}" || - _log_exception "logger --id=\"${pid}\" -t \"${tag}\" -p \"${facility}.${severity}\" \"${syslog_line}\"" - fi - - if [ "${file}" -eq 1 ]; then - clean_line="${line//\\e[1A/}" - clean_line="${clean_line//\\e[K/}" - local file_line="${date} [${upper}] ${clean_line}" - echo -e "${file_line}" >>"${file_path}" || - _log_exception "echo -e \"${file_line}\" >> \"${file_path}\"" - fi - - if [ "${json}" -eq 1 ]; then - local json_line="$(printf '{"timestamp":"%s","level":"%s","message":"%s"}' "${date_s}" "${level}" "${line}")" - echo -e "${json_line}" >>"${json_path}" || - _log_exception "echo -e \"${json_line}\" >> \"${json_path}\"" - fi - - fi - - local -A colours - colours['DEBUG']='\033[34m' # Blue - colours['INFO']='\033[32m' # Green - colours['NOTICE']='' # Unused - colours['WARN']='\033[33m' # Yellow - colours['ERROR']='\033[31m' # Red - colours['CRIT']='' # Unused - colours['ALERT']='' # Unused - colours['EMERG']='' # Unused - colours['DEFAULT']='\033[0m' # Default - - local -A emoticons - emoticons['DEBUG']='🔷' - emoticons['INFO']='❕' - emoticons['NOTICE']='💡' - emoticons['WARN']='🔶' - emoticons['ERROR']='❌' - emoticons['CRIT']='⛔' - emoticons['ALERT']='❗❗' - emoticons['EMERG']='🚨' - emoticons['DEFAULT']='' - - local norm="${colours['DEFAULT']}" - local colour="${colours[${upper}]:-\033[31m}" - - if [[ "${line}" == *"${CLEAR_PREV_LINE}"* ]]; then - # Append package name dynamically when override - line="${CLEAR_PREV_LINE}[$(dirname -- "$0" | sed -e 's/-/ /g' -e 's/\b\(.\)/\u\1/g')] ${line#*"$CLEAR_PREV_LINE"}" - else - line="[$(dirname -- "$0" | sed -e 's/-/ /g' -e 's/\b\(.\)/\u\1/g')] ${line}" - fi - - local std_line="${colour} ${emoticons[${upper}]} ${line}${norm}" - - # Standard Output (Pretty) - case "${level}" in - 'default' | 'info' | 'warn') - echo -e "${std_line}" - ;; - 'debug') - if [ "${debug_level}" -gt 0 ]; then - echo -e "${std_line}" - fi - ;; - 'error') - echo -e "${std_line}" >&2 - ;; - *) - log 'error' "Undefined log level trying to log: ${@}" - ;; - esac -} - -# This is an option if you want to log every single command executed, -# but it will significantly impact script performance and unit tests will fail -if [[ $DEBUG -eq 1 ]]; then - declare -g prev_cmd="null" - declare -g this_cmd="null" - - trap 'prev_cmd=$this_cmd; this_cmd=$BASH_COMMAND; log debug $this_cmd' DEBUG -fi - -# A function that will return a message called when of parameter not provided -# -# Arguments: -# - $1 : optional - function name missing the parameter -# - $2 : optional - name of the parameter missing -missing_param() { - local FUNC_NAME=${1:-""} - local ARG_NAME=${2:-""} - - echo "FATAL: ${FUNC_NAME} parameter ${ARG_NAME} not provided" -} - -# Overwrites the last echo'd command with what is provided -# -# Arguments: -# - $1 : message (eg. "Setting passwords... Done") -overwrite() { - local -r MESSAGE=${1:?$(missing_param "overwrite")} - if [ "${DEBUG}" -eq 1 ]; then - log info "${MESSAGE}" - else - log info "${CLEAR_PREV_LINE}${MESSAGE}" - fi -} - -# Execute a command handle logging of the output -# -# Arguments: -# - $1 : command (eg. "docker service rm elastic-search") -# - $2 : throw or catch (eg. "throw", "catch") -# - $3 : error message (eg. "Failed to remove elastic-search service") -try() { - local -r COMMAND=${1:?$(missing_param "try" "COMMAND")} - local -r SHOULD_THROW=${2:-"throw"} - local -r ERROR_MESSAGE=${3:?$(missing_param "try" "ERROR_MESSAGE")} - - if [ "${BASHLOG_FILE}" -eq 1 ]; then - if ! eval "$COMMAND" >>"$LOG_FILE_PATH" 2>&1; then - log error "$ERROR_MESSAGE" - if [[ "$SHOULD_THROW" == "throw" ]]; then - exit 1 - fi - fi - else - if [ "${DEBUG}" -eq 1 ]; then - if ! eval "$COMMAND"; then - log error "$ERROR_MESSAGE" - if [[ "$SHOULD_THROW" == "throw" ]]; then - exit 1 - fi - fi - else - if ! eval "$COMMAND" 1>/dev/null; then - log error "$ERROR_MESSAGE" - if [[ "$SHOULD_THROW" == "throw" ]]; then - exit 1 - fi - fi - fi - fi -}