diff --git a/.env b/.env new file mode 100644 index 0000000..13675d4 --- /dev/null +++ b/.env @@ -0,0 +1,3 @@ +COMPOSE_PROJECT_NAME=cheetah-infrastructure +COMPOSE_PATH_SEPARATOR=: +COMPOSE_FILE=docker-compose/es.yaml:docker-compose/kafka.yaml diff --git a/config/elasticsearch/Dockerfile b/config/elasticsearch/Dockerfile new file mode 100644 index 0000000..c524c7f --- /dev/null +++ b/config/elasticsearch/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.8-slim + +COPY requirements.txt /tmp/requirements.txt +RUN pip install -r /tmp/requirements.txt + +ADD . /app + +WORKDIR /app +RUN chmod +x /app/docker-entrypoint.sh + +CMD [ "python3", "apply_configuration.py", "es:9200", "--dev" ] +ENTRYPOINT [ "bash", "/app/docker-entrypoint.sh" ] diff --git a/config/elasticsearch/README.md b/config/elasticsearch/README.md new file mode 100644 index 0000000..700b281 --- /dev/null +++ b/config/elasticsearch/README.md @@ -0,0 +1,16 @@ +# Elasticsearch Configuration +To apply the configuration to `localhost:9200`: +``` +python3 apply_configuration.py localhost:9200 --dev +``` + +Supply `--dev` as an argument to override shard and replica settings. +When overriding, `number_of_shards` will be set to 1, and `number_of_replicas` will be set to 0. + +Supply `--diff` as an argument to apply the configuration as a dry-run. +This option will show any differences in index and component templates that would result from a wet run. + +``` +python3 apply_configuration.py https://elasticsearch.skagerak.trifork.dev:9200 --dev + +``` \ No newline at end of file diff --git a/config/elasticsearch/apply_configuration.py b/config/elasticsearch/apply_configuration.py new file mode 100644 index 0000000..a7f016a --- /dev/null +++ b/config/elasticsearch/apply_configuration.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python3 +import json +import os +import sys +import difflib + +import requests + +if len(sys.argv) < 2: + print('Provide the ES host as an argument') + + exit(-1) + +host = sys.argv[1] +dev_mode = '--dev' in sys.argv[2:] +diff_mode = '--diff' in sys.argv[2:] + + +def get_difference(expected, actual): + return ''.join(difflib.unified_diff(expected.splitlines(1), actual.splitlines(1))) + +def print_difference_if_any(existing, new, message): + new_pretty = json.dumps(new, indent=2, sort_keys=True) + existing_pretty = json.dumps(existing, indent=2, sort_keys=True) + + difference = get_difference(existing_pretty, new_pretty) + if difference: + print(f'\n{message}') + + print(difference) + +def pack(parts, value): + if len(parts) == 0: + return value + elif len(parts) == 1: + return {parts[0]: value} + elif len(parts): + return {parts[0]: pack(parts[1:], str(value))} + + return {} + + +def merge_dicts(source, destination): + for key, value in source.items(): + if isinstance(value, dict): + destination_node = destination.setdefault(key, {}) + merge_dicts(value, destination_node) + else: + if key not in destination: + destination[key] = value + + +def get_files(directory, extension): + return [file for file in os.listdir(directory) if file.endswith(extension)] + + +def normalize(from_template, of_type): + template = from_template.copy() + + # Normalize cluster settings + if of_type == 'cluster' and 'transient' not in from_template: + template['transient'] = {} + + # Normalize lifecycle policy + if of_type == 'lifecycle' and 'policy' in from_template: + for phase in from_template.get('policy', {}).get('phases', {}).values(): + # Set minimum age to 0 ms, if not specified + if 'min_age' not in phase: + phase['min_age'] = '0ms' + + # Expand implicit settings for ILM phases + phase_actions = phase.get('actions', {}) + if 'migrate' in phase_actions and not phase_actions.get('migrate', {}): + phase_actions['migrate'] = {'enabled': True} + + if 'delete' in phase_actions and not phase_actions.get('delete', {}): + phase_actions['delete'] = {'delete_searchable_snapshot': True} + + # Normalizes a new template with transformations done by ES s.t. diff can be made + if of_type == 'index' and 'composed_of' not in template: + template['composed_of'] = [] + + settings = template.get('template', {}).get('settings', {}) + if not settings: + return template + + stringify_fields = {'number_of_replicas', 'number_of_shards'} + for stringify_field in stringify_fields: + if stringify_field in settings: + settings[stringify_field] = str(settings[stringify_field]) + + # ES moves everything under settings to an index object + index_settings = settings.get('index', {}) + for setting, value in settings.copy().items(): + if setting == 'index': + continue + + if '.' in setting: + setting_parts = setting.split('.') + if setting_parts[0] == 'index': + setting_parts = setting_parts[1:] + + # ES expands paths (e.g., lifecycle.name) to dicts + root_part = setting_parts[0] + remaining_part = pack(setting_parts[1:], value) + if root_part in index_settings: + merge_dicts(index_settings[root_part], remaining_part) + index_settings[root_part] = remaining_part + else: + index_settings[setting] = value + + # Remove the moved setting + del settings[setting] + + settings['index'] = index_settings + + return template + + +def apply_lifecycles(): + base_dir = './lifecycle_policies' + + for file in get_files(base_dir, '.json'): + name = os.path.splitext(file)[0] + print(f'Applying {name} lifecycle policy...') + + with open(os.path.join(base_dir, file), 'r') as fp: + lifecycle_json = json.load(fp) + lifecycle_url = f'http://{host}/_ilm/policy/{name}_policy' + + if diff_mode: + existing_response = requests.get(lifecycle_url) + if existing_response.status_code != 200: + print(f'No existing lifecycle for {name}') + + return True + + existing_policy = existing_response.json().get(f'{name}_policy') + if not existing_policy: + print(f'Lifecycle {name} not found in response JSON') + + return True + + for key in {'in_use_by', 'version', 'modified_date'}: + if key in existing_policy: + del existing_policy[key] + + print_difference_if_any(existing_policy, normalize(lifecycle_json, 'lifecycle'), f'Lifecycle {name} has changes') + else: + res = requests.put(lifecycle_url, json=lifecycle_json) + + if res.status_code != 200: + print(f'Something went wrong with lifecycle policy {name}: {res.text}') + + return False + + return True + + +def apply_pipelines(): + base_dir = './pipelines' + + for file in get_files(base_dir, '.json'): + name = os.path.splitext(file)[0] + print(f'Applying {name} pipeline...') + + with open(os.path.join(base_dir, file), 'r') as fp: + pipeline_json = json.load(fp) + pipeline_url = f'http://{host}/_ingest/pipeline/{name}_pipeline' + + if diff_mode: + existing_response = requests.get(pipeline_url) + if existing_response.status_code != 200: + print(f'No existing pipeline for {name}') + + return True + + existing_pipeline = existing_response.json().get(f'{name}_pipeline') + if not existing_pipeline: + print(f'Pipeline {name} not found in response JSON') + + return True + + print_difference_if_any(existing_pipeline, normalize(pipeline_json, 'pipeline'), f'Pipeline {name} has changes') + else: + res = requests.put(pipeline_url, json=pipeline_json) + + if res.status_code != 200: + print(f'Something went wrong with pipeline {name}: {res.text}') + + return False + + return True + + +def apply_templates(of_type): + base_dir = f'./{of_type}_templates' + + for file in get_files(base_dir, '.json'): + name = os.path.splitext(file)[0] + print(f'Applying {name} template...') + + with open(os.path.join(base_dir, file), 'r') as fp: + template_json = json.load(fp) + if template_json and dev_mode: + template_settings = template_json.get('template', {}).get('settings', {}) + + if template_settings: + print(f'Overriding shard and replica settings for {name}...') + + template_settings['number_of_shards'] = '1' + template_settings['number_of_replicas'] = '0' + + template_url = f'http://{host}/_{of_type}_template/{name}_template' + if diff_mode: + existing_response = requests.get(template_url) + if existing_response.status_code != 200: + print(f'No existing template for {name}') + + return True + + existing_json = existing_response.json() + if existing_json: + template_list = existing_json.get(f'{of_type}_templates', []) + + if not template_list: + print(f'No {of_type} templates found in response from {name}') + + return True + + existing_template = template_list[-1].get(f'{of_type}_template') + + print_difference_if_any(existing_template, normalize(template_json, of_type), f'{of_type.capitalize()} template {name} has changes') + else: + res = requests.put(template_url, json=template_json) + + if res.status_code != 200: + print(f'Something went wrong with {of_type} template {name}: {res.text}') + + return False + + return True + + +def apply_cluster_settings(): + base_dir = f'./cluster' + url = f'http://{host}/_cluster/settings?flat_settings=true' + + for file in get_files(base_dir, '.json'): + name = os.path.splitext(file)[0] + print(f'Applying {name} cluster settings...') + + with open(os.path.join(base_dir, file), 'r') as fp: + cluster_json = json.load(fp) + + if diff_mode: + existing_response = requests.get(url) + if existing_response.status_code != 200: + print(f'No existing cluster settings found') + + return True + + print_difference_if_any(existing_response.json(), normalize(cluster_json, 'cluster'), f'Cluster settings {name} has changes') + else: + res = requests.put(url, json=cluster_json) + + if res.status_code != 200: + print(f'Something went wrong with applying cluster setting from {name}: {res.text}') + + return False + + +if __name__ == '__main__': + apply_lifecycles() and apply_pipelines() and apply_templates('component') and apply_templates('index') and apply_cluster_settings() diff --git a/config/elasticsearch/cluster/.keep b/config/elasticsearch/cluster/.keep new file mode 100644 index 0000000..e69de29 diff --git a/config/elasticsearch/component_templates/shared.json b/config/elasticsearch/component_templates/shared.json new file mode 100644 index 0000000..a14dc16 --- /dev/null +++ b/config/elasticsearch/component_templates/shared.json @@ -0,0 +1,17 @@ +{ + "template": { + "mappings": { + "properties": { + "timestamp": { + "type": "date" + }, + "customer": { + "type": "keyword" + }, + "uniqueId": { + "type": "keyword" + } + } + } + } +} diff --git a/config/elasticsearch/docker-entrypoint.sh b/config/elasticsearch/docker-entrypoint.sh new file mode 100644 index 0000000..e5ca249 --- /dev/null +++ b/config/elasticsearch/docker-entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +while [ true ] +do + echo "Command: $@" + $@ || true + echo "Sleeping 60" + sleep 60 +done diff --git a/config/elasticsearch/index_templates/otto-reading.json b/config/elasticsearch/index_templates/otto-reading.json new file mode 100644 index 0000000..6b9cead --- /dev/null +++ b/config/elasticsearch/index_templates/otto-reading.json @@ -0,0 +1,31 @@ +{ + "index_patterns": [ + "otto-reading*" + ], + "composed_of": [ + "shared_template" + ], + "template": { + "settings": { + "codec": "best_compression", + "number_of_replicas": 1, + "index.lifecycle.name": "otto-reading-policy", + "index.routing.allocation.include._tier_preference": "data_hot,data_warm" + }, + "mappings": { + "dynamic": "true", + "_source": { + "enabled": true + }, + "dynamic_templates": [], + "properties": { + "deviceId": { + "type": "keyword" + }, + "timestamp": { + "type": "date" + } + } + } + } +} \ No newline at end of file diff --git a/config/elasticsearch/index_templates/readingsummary.json b/config/elasticsearch/index_templates/readingsummary.json new file mode 100644 index 0000000..f6362a1 --- /dev/null +++ b/config/elasticsearch/index_templates/readingsummary.json @@ -0,0 +1,65 @@ +{ + "index_patterns": [ + "readingsummary_*" + ], + "template": { + "settings": { + "codec": "best_compression", + "number_of_replicas": 1, + "index.lifecycle.name": "readingsummary_policy", + "index.routing.allocation.include._tier_preference": "data_hot,data_warm" + }, + "mappings": { + "dynamic": "true", + "_source": { + "enabled": true + }, + "dynamic_templates": [], + "properties": { + "volumeStatistics": { + "properties": { + "min": { + "type": "float" + }, + "max": { + "type": "float" + }, + "mean": { + "type": "float" + }, + "count": { + "type": "long" + }, + "sum": { + "type": "float" + }, + "standardDeviation": { + "type": "float" + } + } + }, + "start": { + "type": "date" + }, + "end": { + "type": "date" + }, + "source": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "key": { + "type": "keyword" + }, + "uniqueId": { + "type": "keyword" + }, + "customer": { + "type": "keyword" + } + } + } + } +} \ No newline at end of file diff --git a/config/elasticsearch/index_templates/registeralerts.json b/config/elasticsearch/index_templates/registeralerts.json new file mode 100644 index 0000000..9ac82f4 --- /dev/null +++ b/config/elasticsearch/index_templates/registeralerts.json @@ -0,0 +1,28 @@ +{ + "index_patterns": [ + "registeralerts_*" + ], + "composed_of": [ + "shared_template" + ], + "template": { + "settings": { + "codec": "best_compression", + "number_of_replicas": 1, + "index.lifecycle.name": "shared_policy", + "index.routing.allocation.include._tier_preference": "data_hot,data_warm" + }, + "mappings": { + "dynamic": "true", + "_source": { + "enabled": true + }, + "dynamic_templates": [], + "properties": { + "alertGeneratedTimestamp": { + "type": "date" + } + } + } + } +} \ No newline at end of file diff --git a/config/elasticsearch/index_templates/watermeterreading.json b/config/elasticsearch/index_templates/watermeterreading.json new file mode 100644 index 0000000..ccd8725 --- /dev/null +++ b/config/elasticsearch/index_templates/watermeterreading.json @@ -0,0 +1,74 @@ +{ + "index_patterns": [ + "watermeterreading_*" + ], + "composed_of": [ + "shared_template" + ], + "template": { + "settings": { + "codec": "best_compression", + "number_of_replicas": 1, + "index.lifecycle.name": "watermeterreading_policy", + "index.routing.allocation.include._tier_preference": "data_hot,data_warm" + }, + "mappings": { + "dynamic": "true", + "_source": { + "enabled": true + }, + "dynamic_templates": [], + "properties": { + "imported": { + "type": "date" + }, + "importedCsvVersion": { + "type": "keyword" + }, + "importedCsvFile": { + "type": "keyword" + }, + "volume1Logged": { + "type": "float" + }, + "serialNumber": { + "type": "keyword" + }, + "manufacture": { + "type": "keyword" + }, + "meterId": { + "type": "keyword" + }, + "acousticNoise": { + "type": "keyword" + }, + "infoCodes": { + "type": "long" + }, + "hourCounter": { + "type": "long" + }, + "volume1": { + "type": "float" + }, + "volume1Delta": { + "properties": { + "timestamp1": { + "type": "date" + }, + "timestamp0": { + "type": "date" + }, + "readingId0": { + "type": "keyword" + }, + "value": { + "type": "float" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/config/elasticsearch/lifecycle_policies/otto-reading.json b/config/elasticsearch/lifecycle_policies/otto-reading.json new file mode 100644 index 0000000..273be87 --- /dev/null +++ b/config/elasticsearch/lifecycle_policies/otto-reading.json @@ -0,0 +1,44 @@ +{ + "policy": { + "phases": { + "hot": { + "actions": { + "set_priority": { + "priority": 100 + } + } + }, + "warm": { + "min_age": "400d", + "actions": { + "migrate": {}, + "shrink": { + "number_of_shards": 1 + }, + "forcemerge": { + "max_num_segments": 1 + }, + "set_priority": { + "priority": 50 + } + } + }, + "cold": { + "min_age": "800d", + "actions": { + "migrate": {}, + "set_priority": { + "priority": 25 + }, + "readonly": {} + } + }, + "delete": { + "min_age": "3650d", + "actions": { + "delete": {} + } + } + } + } + } diff --git a/config/elasticsearch/lifecycle_policies/readingsummary.json b/config/elasticsearch/lifecycle_policies/readingsummary.json new file mode 100644 index 0000000..273be87 --- /dev/null +++ b/config/elasticsearch/lifecycle_policies/readingsummary.json @@ -0,0 +1,44 @@ +{ + "policy": { + "phases": { + "hot": { + "actions": { + "set_priority": { + "priority": 100 + } + } + }, + "warm": { + "min_age": "400d", + "actions": { + "migrate": {}, + "shrink": { + "number_of_shards": 1 + }, + "forcemerge": { + "max_num_segments": 1 + }, + "set_priority": { + "priority": 50 + } + } + }, + "cold": { + "min_age": "800d", + "actions": { + "migrate": {}, + "set_priority": { + "priority": 25 + }, + "readonly": {} + } + }, + "delete": { + "min_age": "3650d", + "actions": { + "delete": {} + } + } + } + } + } diff --git a/config/elasticsearch/lifecycle_policies/shared.json b/config/elasticsearch/lifecycle_policies/shared.json new file mode 100644 index 0000000..273be87 --- /dev/null +++ b/config/elasticsearch/lifecycle_policies/shared.json @@ -0,0 +1,44 @@ +{ + "policy": { + "phases": { + "hot": { + "actions": { + "set_priority": { + "priority": 100 + } + } + }, + "warm": { + "min_age": "400d", + "actions": { + "migrate": {}, + "shrink": { + "number_of_shards": 1 + }, + "forcemerge": { + "max_num_segments": 1 + }, + "set_priority": { + "priority": 50 + } + } + }, + "cold": { + "min_age": "800d", + "actions": { + "migrate": {}, + "set_priority": { + "priority": 25 + }, + "readonly": {} + } + }, + "delete": { + "min_age": "3650d", + "actions": { + "delete": {} + } + } + } + } + } diff --git a/config/elasticsearch/lifecycle_policies/watermeterreading.json b/config/elasticsearch/lifecycle_policies/watermeterreading.json new file mode 100644 index 0000000..273be87 --- /dev/null +++ b/config/elasticsearch/lifecycle_policies/watermeterreading.json @@ -0,0 +1,44 @@ +{ + "policy": { + "phases": { + "hot": { + "actions": { + "set_priority": { + "priority": 100 + } + } + }, + "warm": { + "min_age": "400d", + "actions": { + "migrate": {}, + "shrink": { + "number_of_shards": 1 + }, + "forcemerge": { + "max_num_segments": 1 + }, + "set_priority": { + "priority": 50 + } + } + }, + "cold": { + "min_age": "800d", + "actions": { + "migrate": {}, + "set_priority": { + "priority": 25 + }, + "readonly": {} + } + }, + "delete": { + "min_age": "3650d", + "actions": { + "delete": {} + } + } + } + } + } diff --git a/config/elasticsearch/pipelines/.keep b/config/elasticsearch/pipelines/.keep new file mode 100644 index 0000000..e69de29 diff --git a/config/elasticsearch/requirements.txt b/config/elasticsearch/requirements.txt new file mode 100644 index 0000000..67fe0da --- /dev/null +++ b/config/elasticsearch/requirements.txt @@ -0,0 +1,3 @@ +requests +elasticsearch + diff --git a/config/jobs/log4j.properties b/config/jobs/log4j.properties new file mode 100644 index 0000000..c34deda --- /dev/null +++ b/config/jobs/log4j.properties @@ -0,0 +1,51 @@ +# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds. +monitorInterval=30 + +# This affects logging for both user code and Flink +rootLogger.level = DEBUG +rootLogger.appenderRef.console.ref = ConsoleAppender +rootLogger.appenderRef.rolling.ref = RollingFileAppender + +# Uncomment this if you want to _only_ change Flink's logging +#logger.flink.name = org.apache.flink +#logger.flink.level = INFO + +# The following lines keep the log level of common libraries/connectors on +# log level INFO. The root logger does not override this. You have to manually +# change the log levels here. +logger.akka.name = akka +logger.akka.level = INFO +logger.kafka.name= org.apache.kafka +logger.kafka.level = INFO +logger.hadoop.name = org.apache.hadoop +logger.hadoop.level = INFO +logger.zookeeper.name = org.apache.zookeeper +logger.zookeeper.level = INFO +logger.shaded_zookeeper.name = org.apache.flink.shaded.zookeeper3 +logger.shaded_zookeeper.level = INFO + +# Log all infos to the console +appender.console.name = ConsoleAppender +appender.console.type = CONSOLE +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n + +# Log all infos in the given rolling file +appender.rolling.name = RollingFileAppender +appender.rolling.type = RollingFile +appender.rolling.append = true +appender.rolling.fileName = ${sys:log.file} +appender.rolling.filePattern = ${sys:log.file}.%i +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n +appender.rolling.policies.type = Policies +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=100MB +appender.rolling.policies.startup.type = OnStartupTriggeringPolicy +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10} + +# Suppress the irrelevant (wrong) warnings from the Netty channel handler +logger.netty.name = org.jboss.netty.channel.DefaultChannelPipeline +logger.netty.level = OFF +# \ No newline at end of file diff --git a/config/kafkaconfig.sh b/config/kafkaconfig.sh new file mode 100644 index 0000000..f563ab7 --- /dev/null +++ b/config/kafkaconfig.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +echo "Waiting for Kafka to be ready..." +cub kafka-ready -b kafka:19092 1 20 #expected_brokers timeout_seconds + +echo "Creating topics with retention set to 3 years" +for topic in OttoReadingMovingAverage OttoReading; do + kafka-topics --create --if-not-exists --zookeeper zoo:2181 --partitions 1 --replication-factor 1 --topic $topic --config retention.ms=94608000000 + kafka-configs --zookeeper zoo:2181 --entity-type topics --entity-name $topic --alter --add-config retention.ms=94608000000 +done +echo "Creating topics done" + +# Alter in case they already existed +## Options: +#cleanup.policy +#compression.type +#delete.retention.ms +#file.delete.delay.ms +#flush.messages +#flush.ms +#follower.replication.throttled.replicas +#index.interval.bytes +#leader.replication.throttled.replicas +#max.compaction.lag.ms +#max.message.bytes +#message.downconversion.enable +#message.format.version +#message.timestamp.difference.max.ms +#message.timestamp.type +#min.cleanable.dirty.ratio +#min.compaction.lag.ms +#min.insync.replicas +#preallocate +#retention.bytes +#retention.ms +#segment.bytes +#segment.index.bytes +#segment.jitter.ms +#segment.ms +#unclean.leader.election.enable diff --git a/config/kowl.yaml b/config/kowl.yaml new file mode 100644 index 0000000..6b70c61 --- /dev/null +++ b/config/kowl.yaml @@ -0,0 +1,7 @@ +# See: https://github.com/cloudhut/kowl/tree/master/docs/config for reference config files. +kafka: + brokers: + - kafka:19092 + +# server: + # listenPort: 8080 \ No newline at end of file diff --git a/config/mqtt/mosquitto.conf b/config/mqtt/mosquitto.conf new file mode 100644 index 0000000..3b29416 --- /dev/null +++ b/config/mqtt/mosquitto.conf @@ -0,0 +1,6 @@ +persistence true +persistence_location /mosquitto/data/ +allow_anonymous true +listener 1883 + +#log_dest file /mosquitto/log/mosquitto.log diff --git a/docker-compose/es.yaml b/docker-compose/es.yaml new file mode 100644 index 0000000..81f2b92 --- /dev/null +++ b/docker-compose/es.yaml @@ -0,0 +1,48 @@ +version: '2.1' + +services: + + es: + image: elasticsearch:8.4.1 + hostname: es + environment: + - node.name=es01 + - discovery.type=single-node + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms2g -Xmx2g -Dlog4j2.formatMsgNoLookups" + - "xpack.security.enabled=false" + - "xpack.license.self_generated.type=basic" + - ELASTIC_PASSWORD=d6MR9LN8kwpqlZClVuP3WYxfZiuOEOEN + ports: + - 9200:9200 + volumes: + - es:/usr/share/elasticsearch/data + restart: unless-stopped + + es-cfg: + build: + context: ../config/elasticsearch + dockerfile: Dockerfile + depends_on: + - es + restart: unless-stopped + profiles: + - donotstart + + + kibana: + image: docker.elastic.co/kibana/kibana:8.4.3 + hostname: kibana + ports: + - 5601:5601 + environment: + ELASTICSEARCH_HOSTS: '["http://es:9200"]' + restart: unless-stopped + +volumes: + es: + +networks: + default: + name: "cheetah-infrastructure" + external: true \ No newline at end of file diff --git a/docker-compose/kafka.yaml b/docker-compose/kafka.yaml new file mode 100644 index 0000000..e985c3f --- /dev/null +++ b/docker-compose/kafka.yaml @@ -0,0 +1,85 @@ +version: '2.1' + +services: + + zoo: + image: confluentinc/cp-zookeeper:7.2.1 + hostname: zoo + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + #volumes: + # - ./zk-single-kafka-single/zoo/data:/data + # - ./zk-single-kafka-single/zoo/datalog:/datalog + restart: unless-stopped + + kafka: + image: confluentinc/cp-kafka:7.2.1 + hostname: kafka + ports: + - "9092:9092" + environment: + KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL + KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka:19092,LISTENER_DOCKER_EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT + KAFKA_ZOOKEEPER_CONNECT: "zoo:2181" + KAFKA_BROKER_ID: 1 + KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_DELETE_TOPIC_ENABLE: "true" + # volumes: + #- ./zk-single-kafka-single/kafka/data:/var/lib/kafka/data + depends_on: + - zoo + - kafka-setup + restart: unless-stopped + + # This "container" is a workaround to pre-create topics + kafka-setup: + image: confluentinc/cp-kafka:7.2.1 + hostname: kafka-setup + container_name: kafka-setup + volumes: + - ../config/kafkaconfig.sh:/etc/config/kafkaconfig.sh + command: "bash /etc/config/kafkaconfig.sh" + restart: on-failure + environment: + # The following settings are listed here only to satisfy the image's requirements. + # We override the image's `command` anyways, hence this container will not start a broker. + KAFKA_BROKER_ID: ignored + KAFKA_ZOOKEEPER_CONNECT: ignored + profiles: + - donotstart + + kowl: + image: quay.io/cloudhut/kowl:latest + hostname: kowl + restart: on-failure + volumes: + - ../config/kowl.yaml:/etc/kowl/config.yaml + ports: + - "8080:8080" + entrypoint: ./kowl --config.filepath=/etc/kowl/config.yaml + depends_on: + - kafka + + # Kafka Prometheus exporter https://github.com/cloudhut/kminion + kafka-minion: + image: quay.io/cloudhut/kminion:master + hostname: kafka-minion + container_name: kafka-minion + depends_on: + - zoo + - kafka + ports: + - 8088:8080 + environment: + KAFKA_BROKERS: kafka:19092 + restart: unless-stopped + profiles: + - donotstart + +networks: + default: + name: "cheetah-infrastructure"