diff --git a/examples/tracetest-signoz/.gitignore b/examples/tracetest-signoz/.gitignore
deleted file mode 100644
index 5fbc35fe06..0000000000
--- a/examples/tracetest-signoz/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-signoz/data/alertmanager/*
-signoz/data/clickhouse/*
-signoz/data/signoz/*
-signoz/data/zookeeper-1/*
diff --git a/examples/tracetest-signoz/README.md b/examples/tracetest-signoz/README.md
deleted file mode 100644
index b7227737e1..0000000000
--- a/examples/tracetest-signoz/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Tracetest + Signoz
-
-This repository objective is to show how you can configure your Tracetest instance to connect to Signoz and use it as its tracing backend.
-
-## Steps
-
-1. [Install the tracetest CLI](https://docs.tracetest.io/installing/)
-2. Run `tracetest configure --server-url http://localhost:11633` on a terminal
-3. Run the project by using docker-compose: `docker-compose up` (Linux) or `docker compose up` (Mac)
-4. Test if it works by running: `tracetest test run -d tracetest/tests/list-tests.yaml`. This would trigger a test that will send and retrieve spans from the Signoz instance that is running on your machine.
diff --git a/examples/tracetest-signoz/docker-compose.yml b/examples/tracetest-signoz/docker-compose.yml
deleted file mode 100644
index 8602e24e4d..0000000000
--- a/examples/tracetest-signoz/docker-compose.yml
+++ /dev/null
@@ -1,174 +0,0 @@
-version: '3'
-services:
- tracetest:
- image: kubeshop/tracetest:${TAG:-latest}
- platform: linux/amd64
- volumes:
- - type: bind
- source: ./tracetest/tracetest-config.yaml
- target: /app/tracetest.yaml
- - type: bind
- source: ./tracetest/tracetest-provision.yaml
- target: /app/provision.yaml
- command: --provisioning-file /app/provision.yaml
- ports:
- - 11633:11633
- extra_hosts:
- - "host.docker.internal:host-gateway"
- depends_on:
- postgres:
- condition: service_healthy
- otel-collector:
- condition: service_started
- healthcheck:
- test: [ "CMD", "wget", "--spider", "localhost:11633" ]
- interval: 1s
- timeout: 3s
- retries: 60
- environment:
- TRACETEST_DEV: ${TRACETEST_DEV}
-
- postgres:
- image: postgres:14
- environment:
- POSTGRES_PASSWORD: postgres
- POSTGRES_USER: postgres
- healthcheck:
- test: pg_isready -U "$$POSTGRES_USER" -d "$$POSTGRES_DB"
- interval: 1s
- timeout: 5s
- retries: 60
-
- otel-collector:
- image: otel/opentelemetry-collector:0.54.0
- command:
- - "--config"
- - "/otel-local-config.yaml"
- volumes:
- - ./tracetest/collector.config.yaml:/otel-local-config.yaml
- ports:
- - 4317:4317
- depends_on:
- signoz-otel-collector:
- condition: service_started
- signoz-otel-collector-metrics:
- condition: service_started
-
- ###################################################################################################################################################################################################
- # Signoz setup
- ###################################################################################################################################################################################################
- zookeeper-1:
- image: bitnami/zookeeper:3.7.1
- container_name: zookeeper-1
- hostname: zookeeper-1
- user: root
- volumes:
- - ./signoz/data/zookeeper-1:/bitnami/zookeeper
- environment:
- - ZOO_SERVER_ID=1
- - ALLOW_ANONYMOUS_LOGIN=yes
- - ZOO_AUTOPURGE_INTERVAL=1
-
- clickhouse:
- restart: on-failure
- image: clickhouse/clickhouse-server:22.8.8-alpine
- tty: true
- depends_on:
- - zookeeper-1
- logging:
- options:
- max-size: 50m
- max-file: "3"
- healthcheck:
- test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
- interval: 30s
- timeout: 5s
- retries: 3
- ulimits:
- nproc: 65535
- nofile:
- soft: 262144
- hard: 262144
- container_name: clickhouse
- hostname: clickhouse
- volumes:
- - ./signoz/clickhouse-config.xml:/etc/clickhouse-server/config.xml
- - ./signoz/clickhouse-users.xml:/etc/clickhouse-server/users.xml
- - ./signoz/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- - ./signoz/clickhouse-cluster.xml:/etc/clickhouse-server/config.d/cluster.xml
- - ./signoz/data/clickhouse/:/var/lib/clickhouse/
- - ./signoz/user_scripts:/var/lib/clickhouse/user_scripts/
-
- alertmanager:
- image: signoz/alertmanager:${ALERTMANAGER_TAG:-0.23.1}
- volumes:
- - ./signoz/data/alertmanager:/data
- depends_on:
- query-service:
- condition: service_healthy
- restart: on-failure
- command:
- - --queryService.url=http://query-service:8085
- - --storage.path=/data
-
- query-service:
- image: signoz/query-service:${DOCKER_TAG:-0.22.0}
- command: ["-config=/root/config/prometheus.yml"]
- volumes:
- - ./signoz/prometheus.yml:/root/config/prometheus.yml
- - ./signoz/data/signoz/:/var/lib/signoz/
- environment:
- - ClickHouseUrl=tcp://clickhouse:9000/?database=signoz_traces
- - ALERTMANAGER_API_PREFIX=http://alertmanager:9093/api/
- - SIGNOZ_LOCAL_DB_PATH=/var/lib/signoz/signoz.db
- - DASHBOARDS_PATH=/root/config/dashboards
- - STORAGE=clickhouse
- - GODEBUG=netdns=go
- - TELEMETRY_ENABLED=true
- - DEPLOYMENT_TYPE=docker-standalone-amd
- restart: on-failure
- healthcheck:
- test: ["CMD", "wget", "--spider", "-q", "localhost:8080/api/v1/health"]
- interval: 30s
- timeout: 5s
- retries: 3
- depends_on:
- clickhouse:
- condition: service_healthy
-
- frontend:
- image: signoz/frontend:${DOCKER_TAG:-0.22.0}
- restart: on-failure
- depends_on:
- - alertmanager
- - query-service
- ports:
- - 3301:3301
- volumes:
- - ./signoz/common/nginx-config.conf:/etc/nginx/conf.d/default.conf
-
- signoz-otel-collector:
- image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.2}
- command: ["--config=/etc/otel-collector-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
- user: root # required for reading docker container logs
- volumes:
- - ./signoz/otel-collector-config.yaml:/etc/otel-collector-config.yaml
- - /var/lib/docker/containers:/var/lib/docker/containers:ro
- environment:
- - OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- - DOCKER_MULTI_NODE_CLUSTER=false
- - LOW_CARDINAL_EXCEPTION_GROUPING=false
- restart: on-failure
- depends_on:
- clickhouse:
- condition: service_healthy
-
- signoz-otel-collector-metrics:
- image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.79.2}
- command: ["--config=/etc/otel-collector-metrics-config.yaml", "--feature-gates=-pkg.translator.prometheus.NormalizeName"]
- volumes:
- - ./signoz/otel-collector-metrics-config.yaml:/etc/otel-collector-metrics-config.yaml
- restart: on-failure
- depends_on:
- clickhouse:
- condition: service_healthy
diff --git a/examples/tracetest-signoz/signoz/alertmanager.yml b/examples/tracetest-signoz/signoz/alertmanager.yml
deleted file mode 100644
index d69357f9dd..0000000000
--- a/examples/tracetest-signoz/signoz/alertmanager.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-global:
- resolve_timeout: 1m
- slack_api_url: 'https://hooks.slack.com/services/xxx'
-
-route:
- receiver: 'slack-notifications'
-
-receivers:
-- name: 'slack-notifications'
- slack_configs:
- - channel: '#alerts'
- send_resolved: true
- icon_url: https://avatars3.githubusercontent.com/u/3380462
- title: |-
- [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
- {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
- {{" "}}(
- {{- with .CommonLabels.Remove .GroupLabels.Names }}
- {{- range $index, $label := .SortedPairs -}}
- {{ if $index }}, {{ end }}
- {{- $label.Name }}="{{ $label.Value -}}"
- {{- end }}
- {{- end -}}
- )
- {{- end }}
- text: >-
- {{ range .Alerts -}}
- *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
-
- *Description:* {{ .Annotations.description }}
-
- *Details:*
- {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- {{ end }}
- {{ end }}
\ No newline at end of file
diff --git a/examples/tracetest-signoz/signoz/alerts.yml b/examples/tracetest-signoz/signoz/alerts.yml
deleted file mode 100644
index 810a20750c..0000000000
--- a/examples/tracetest-signoz/signoz/alerts.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-groups:
-- name: ExampleCPULoadGroup
- rules:
- - alert: HighCpuLoad
- expr: system_cpu_load_average_1m > 0.1
- for: 0m
- labels:
- severity: warning
- annotations:
- summary: High CPU load
- description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
diff --git a/examples/tracetest-signoz/signoz/clickhouse-cluster.xml b/examples/tracetest-signoz/signoz/clickhouse-cluster.xml
deleted file mode 100644
index 0e3ddcdde0..0000000000
--- a/examples/tracetest-signoz/signoz/clickhouse-cluster.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-
-
-
- zookeeper-1
- 2181
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- clickhouse
- 9000
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/examples/tracetest-signoz/signoz/clickhouse-config.xml b/examples/tracetest-signoz/signoz/clickhouse-config.xml
deleted file mode 100644
index f8213b6521..0000000000
--- a/examples/tracetest-signoz/signoz/clickhouse-config.xml
+++ /dev/null
@@ -1,1140 +0,0 @@
-
-
-
-
-
- information
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
-
- 1000M
- 10
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 8123
-
-
- 9000
-
-
- 9004
-
-
- 9005
-
-
-
-
-
-
-
-
-
-
-
- 9009
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4096
-
-
- 3
-
-
-
-
- false
-
-
- /path/to/ssl_cert_file
- /path/to/ssl_key_file
-
-
- false
-
-
- /path/to/ssl_ca_cert_file
-
-
- none
-
-
- 0
-
-
- -1
- -1
-
-
- false
-
-
-
-
-
-
-
-
-
-
- none
- true
- true
- sslv2,sslv3
- true
-
-
-
- true
- true
- sslv2,sslv3
- true
-
-
-
- RejectCertificateHandler
-
-
-
-
-
-
-
-
- 100
-
-
- 0
-
-
-
- 10000
-
-
-
-
-
- 0.9
-
-
- 4194304
-
-
- 0
-
-
-
-
-
- 8589934592
-
-
- 5368709120
-
-
-
- 1000
-
-
- 134217728
-
-
- 10000
-
-
- /var/lib/clickhouse/
-
-
- /var/lib/clickhouse/tmp/
-
-
-
- `
-
-
-
-
-
- /var/lib/clickhouse/user_files/
-
-
-
-
-
-
-
-
-
-
-
-
- users.xml
-
-
-
- /var/lib/clickhouse/access/
-
-
-
-
-
-
- default
-
-
-
-
-
-
-
-
-
-
-
- default
-
-
-
-
-
-
-
-
- true
-
-
- false
-
- ' | sed -e 's|.*>\(.*\)<.*|\1|')
- wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
- apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
- clickhouse-jdbc-bridge &
-
- * [CentOS/RHEL]
- export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
- export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '' | sed -e 's|.*>\(.*\)<.*|\1|')
- wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
- yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
- clickhouse-jdbc-bridge &
-
- Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 3600
-
-
-
- 3600
-
-
- 60
-
-
-
-
-
-
-
-
-
-
-
-
- system
-
-
- toYYYYMM(event_date)
-
-
-
-
-
- 7500
-
-
-
-
- system
-
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
-
-
- system
-
- 7500
- 1000
-
-
-
-
- system
-
-
- 7000
-
-
-
-
-
-
- engine MergeTree
- partition by toYYYYMM(finish_date)
- order by (finish_date, finish_time_us, trace_id)
-
- system
-
- 7500
-
-
-
-
-
- system
-
-
-
- 1000
-
-
-
-
-
-
-
- system
-
-
- toYYYYMM(event_date)
- 7500
-
-
-
-
-
-
-
-
-
- *_dictionary.xml
-
-
- *function.xml
- /var/lib/clickhouse/user_scripts/
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /clickhouse/task_queue/ddl
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- click_cost
- any
-
- 0
- 3600
-
-
- 86400
- 60
-
-
-
- max
-
- 0
- 60
-
-
- 3600
- 300
-
-
- 86400
- 3600
-
-
-
-
-
- /var/lib/clickhouse/format_schemas/
-
-
-
-
- hide encrypt/decrypt arguments
- ((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:'(?:\\'|.)+'|.*?)\s*\)
-
- \1(???)
-
-
-
-
-
-
-
-
-
- false
-
- false
-
-
- https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277
-
-
-
-
-
-
-
-
-
-
- 268435456
- true
-
-
diff --git a/examples/tracetest-signoz/signoz/clickhouse-storage.xml b/examples/tracetest-signoz/signoz/clickhouse-storage.xml
deleted file mode 100644
index 54ec4976f5..0000000000
--- a/examples/tracetest-signoz/signoz/clickhouse-storage.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
- 10485760
-
-
- s3
-
- https://BUCKET-NAME.s3-REGION-NAME.amazonaws.com/data/
- ACCESS-KEY-ID
- SECRET-ACCESS-KEY
-
-
-
-
-
-
-
-
-
-
- default
-
-
- s3
- 0
-
-
-
-
-
-
diff --git a/examples/tracetest-signoz/signoz/clickhouse-users.xml b/examples/tracetest-signoz/signoz/clickhouse-users.xml
deleted file mode 100644
index f18562071d..0000000000
--- a/examples/tracetest-signoz/signoz/clickhouse-users.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
-
-
-
-
-
-
-
- 10000000000
-
-
- random
-
-
-
-
- 1
-
-
-
-
-
-
-
-
-
-
-
-
- ::/0
-
-
-
- default
-
-
- default
-
-
-
-
-
-
-
-
-
-
-
-
-
- 3600
-
-
- 0
- 0
- 0
- 0
- 0
-
-
-
-
diff --git a/examples/tracetest-signoz/signoz/common/nginx-config.conf b/examples/tracetest-signoz/signoz/common/nginx-config.conf
deleted file mode 100644
index a8673496a2..0000000000
--- a/examples/tracetest-signoz/signoz/common/nginx-config.conf
+++ /dev/null
@@ -1,43 +0,0 @@
-server {
- listen 3301;
- server_name _;
-
- gzip on;
- gzip_static on;
- gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
- gzip_proxied any;
- gzip_vary on;
- gzip_comp_level 6;
- gzip_buffers 16 8k;
- gzip_http_version 1.1;
-
- # to handle uri issue 414 from nginx
- client_max_body_size 24M;
- large_client_header_buffers 8 128k;
-
- location / {
- if ( $uri = '/index.html' ) {
- add_header Cache-Control no-store always;
- }
- root /usr/share/nginx/html;
- index index.html index.htm;
- try_files $uri $uri/ /index.html;
- }
-
- location /api/alertmanager {
- proxy_pass http://alertmanager:9093/api/v2;
- }
-
- location /api {
- proxy_pass http://query-service:8080/api;
- # connection will be closed if no data is read for 600s between successive read operations
- proxy_read_timeout 600s;
- }
-
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root /usr/share/nginx/html;
- }
-}
\ No newline at end of file
diff --git a/examples/tracetest-signoz/signoz/custom-function.xml b/examples/tracetest-signoz/signoz/custom-function.xml
deleted file mode 100644
index b2b3f91a1a..0000000000
--- a/examples/tracetest-signoz/signoz/custom-function.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
- executable
- histogramQuantile
- Float64
-
- Array(Float64)
- buckets
-
-
- Array(Float64)
- counts
-
-
- Float64
- quantile
-
- CSV
- ./histogramQuantile
-
-
diff --git a/examples/tracetest-signoz/signoz/otel-collector-config.yaml b/examples/tracetest-signoz/signoz/otel-collector-config.yaml
deleted file mode 100644
index c331f3a032..0000000000
--- a/examples/tracetest-signoz/signoz/otel-collector-config.yaml
+++ /dev/null
@@ -1,210 +0,0 @@
-receivers:
- filelog/dockercontainers:
- include: [ "/var/lib/docker/containers/*/*.log" ]
- start_at: end
- include_file_path: true
- include_file_name: false
- operators:
- - type: json_parser
- id: parser-docker
- output: extract_metadata_from_filepath
- timestamp:
- parse_from: attributes.time
- layout: '%Y-%m-%dT%H:%M:%S.%LZ'
- - type: regex_parser
- id: extract_metadata_from_filepath
- regex: '^.*containers/(?P[^_]+)/.*log$'
- parse_from: attributes["log.file.path"]
- output: parse_body
- - type: move
- id: parse_body
- from: attributes.log
- to: body
- output: time
- - type: remove
- id: time
- field: attributes.time
- opencensus:
- endpoint: 0.0.0.0:55678
- otlp/spanmetrics:
- protocols:
- grpc:
- endpoint: localhost:12345
- otlp:
- protocols:
- grpc:
- endpoint: 0.0.0.0:4317
- http:
- endpoint: 0.0.0.0:4318
- jaeger:
- protocols:
- grpc:
- endpoint: 0.0.0.0:14250
- thrift_http:
- endpoint: 0.0.0.0:14268
- # thrift_compact:
- # endpoint: 0.0.0.0:6831
- # thrift_binary:
- # endpoint: 0.0.0.0:6832
- hostmetrics:
- collection_interval: 30s
- scrapers:
- cpu: {}
- load: {}
- memory: {}
- disk: {}
- filesystem: {}
- network: {}
- prometheus:
- config:
- global:
- scrape_interval: 60s
- scrape_configs:
- # otel-collector internal metrics
- - job_name: otel-collector
- static_configs:
- - targets:
- - localhost:8888
- labels:
- job_name: otel-collector
-
-
-processors:
- logstransform/internal:
- operators:
- - type: trace_parser
- if: '"trace_id" in attributes or "span_id" in attributes'
- trace_id:
- parse_from: attributes.trace_id
- span_id:
- parse_from: attributes.span_id
- output: remove_trace_id
- - type: trace_parser
- if: '"traceId" in attributes or "spanId" in attributes'
- trace_id:
- parse_from: attributes.traceId
- span_id:
- parse_from: attributes.spanId
- output: remove_traceId
- - id: remove_traceId
- type: remove
- if: '"traceId" in attributes'
- field: attributes.traceId
- output: remove_spanId
- - id: remove_spanId
- type: remove
- if: '"spanId" in attributes'
- field: attributes.spanId
- - id: remove_trace_id
- type: remove
- if: '"trace_id" in attributes'
- field: attributes.trace_id
- output: remove_span_id
- - id: remove_span_id
- type: remove
- if: '"span_id" in attributes'
- field: attributes.span_id
- batch:
- send_batch_size: 10000
- send_batch_max_size: 11000
- timeout: 10s
- signozspanmetrics/prometheus:
- metrics_exporter: prometheus
- latency_histogram_buckets: [100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s ]
- dimensions_cache_size: 100000
- dimensions:
- - name: service.namespace
- default: default
- - name: deployment.environment
- default: default
- # This is added to ensure the uniqueness of the timeseries
- # Otherwise, identical timeseries produced by multiple replicas of
- # collectors result in incorrect APM metrics
- - name: 'signoz.collector.id'
- # memory_limiter:
- # # 80% of maximum memory up to 2G
- # limit_mib: 1500
- # # 25% of limit up to 2G
- # spike_limit_mib: 512
- # check_interval: 5s
- #
- # # 50% of the maximum memory
- # limit_percentage: 50
- # # 20% of max memory usage spike expected
- # spike_limit_percentage: 20
- # queued_retry:
- # num_workers: 4
- # queue_size: 100
- # retry_on_failure: true
- resourcedetection:
- # Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
- detectors: [env, system] # include ec2 for AWS, gcp for GCP and azure for Azure.
- timeout: 2s
-
-extensions:
- health_check:
- endpoint: 0.0.0.0:13133
- zpages:
- endpoint: 0.0.0.0:55679
- pprof:
- endpoint: 0.0.0.0:1777
-
-exporters:
- clickhousetraces:
- datasource: tcp://clickhouse:9000/?database=signoz_traces
- docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
- low_cardinal_exception_grouping: ${LOW_CARDINAL_EXCEPTION_GROUPING}
- clickhousemetricswrite:
- endpoint: tcp://clickhouse:9000/?database=signoz_metrics
- resource_to_telemetry_conversion:
- enabled: true
- clickhousemetricswrite/prometheus:
- endpoint: tcp://clickhouse:9000/?database=signoz_metrics
- prometheus:
- endpoint: 0.0.0.0:8889
- # logging: {}
-
- clickhouselogsexporter:
- dsn: tcp://clickhouse:9000/
- docker_multi_node_cluster: ${DOCKER_MULTI_NODE_CLUSTER}
- timeout: 5s
- sending_queue:
- queue_size: 100
- retry_on_failure:
- enabled: true
- initial_interval: 5s
- max_interval: 30s
- max_elapsed_time: 300s
-
-service:
- telemetry:
- metrics:
- address: 0.0.0.0:8888
- extensions:
- - health_check
- - zpages
- - pprof
- pipelines:
- traces:
- receivers: [jaeger, otlp]
- processors: [signozspanmetrics/prometheus, batch]
- exporters: [clickhousetraces]
- metrics:
- receivers: [otlp]
- processors: [batch]
- exporters: [clickhousemetricswrite]
- metrics/generic:
- receivers: [hostmetrics]
- processors: [resourcedetection, batch]
- exporters: [clickhousemetricswrite]
- metrics/prometheus:
- receivers: [prometheus]
- processors: [batch]
- exporters: [clickhousemetricswrite/prometheus]
- metrics/spanmetrics:
- receivers: [otlp/spanmetrics]
- exporters: [prometheus]
- logs:
- receivers: [otlp, filelog/dockercontainers]
- processors: [logstransform/internal, batch]
- exporters: [clickhouselogsexporter]
\ No newline at end of file
diff --git a/examples/tracetest-signoz/signoz/otel-collector-metrics-config.yaml b/examples/tracetest-signoz/signoz/otel-collector-metrics-config.yaml
deleted file mode 100644
index 3af1fae09a..0000000000
--- a/examples/tracetest-signoz/signoz/otel-collector-metrics-config.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-receivers:
- otlp:
- protocols:
- grpc:
- http:
- prometheus:
- config:
- scrape_configs:
- # otel-collector-metrics internal metrics
- - job_name: otel-collector-metrics
- scrape_interval: 60s
- static_configs:
- - targets:
- - localhost:8888
- labels:
- job_name: otel-collector-metrics
- # SigNoz span metrics
- - job_name: signozspanmetrics-collector
- scrape_interval: 60s
- static_configs:
- - targets:
- - signoz-otel-collector:8889
-
-processors:
- batch:
- send_batch_size: 10000
- send_batch_max_size: 11000
- timeout: 10s
- # memory_limiter:
- # # 80% of maximum memory up to 2G
- # limit_mib: 1500
- # # 25% of limit up to 2G
- # spike_limit_mib: 512
- # check_interval: 5s
- #
- # # 50% of the maximum memory
- # limit_percentage: 50
- # # 20% of max memory usage spike expected
- # spike_limit_percentage: 20
- # queued_retry:
- # num_workers: 4
- # queue_size: 100
- # retry_on_failure: true
-
-extensions:
- health_check:
- endpoint: 0.0.0.0:13133
- zpages:
- endpoint: 0.0.0.0:55679
- pprof:
- endpoint: 0.0.0.0:1777
-
-exporters:
- clickhousemetricswrite:
- endpoint: tcp://clickhouse:9000/?database=signoz_metrics
-
-service:
- telemetry:
- metrics:
- address: 0.0.0.0:8888
- extensions:
- - health_check
- - zpages
- - pprof
- pipelines:
- metrics:
- receivers: [prometheus]
- processors: [batch]
- exporters: [clickhousemetricswrite]
diff --git a/examples/tracetest-signoz/signoz/prometheus.yml b/examples/tracetest-signoz/signoz/prometheus.yml
deleted file mode 100644
index 6a796ea1d0..0000000000
--- a/examples/tracetest-signoz/signoz/prometheus.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-# my global config
-global:
- scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
- evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
- # scrape_timeout is set to the global default (10s).
-
-# Alertmanager configuration
-alerting:
- alertmanagers:
- - static_configs:
- - targets:
- - alertmanager:9093
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
- # - "first_rules.yml"
- # - "second_rules.yml"
- - 'alerts.yml'
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs: []
-
-remote_read:
- - url: tcp://clickhouse:9000/?database=signoz_metrics
diff --git a/examples/tracetest-signoz/signoz/user_scripts/histogramQuantile b/examples/tracetest-signoz/signoz/user_scripts/histogramQuantile
deleted file mode 100755
index 3b77a7b26d..0000000000
Binary files a/examples/tracetest-signoz/signoz/user_scripts/histogramQuantile and /dev/null differ
diff --git a/examples/tracetest-signoz/signoz/user_scripts/histogramQuantile.go b/examples/tracetest-signoz/signoz/user_scripts/histogramQuantile.go
deleted file mode 100644
index 9540a774e1..0000000000
--- a/examples/tracetest-signoz/signoz/user_scripts/histogramQuantile.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package main
-
-import (
- "bufio"
- "fmt"
- "math"
- "os"
- "sort"
- "strconv"
- "strings"
-)
-
-// NOTE: executable must be built with target OS and architecture set to linux/amd64
-// env GOOS=linux GOARCH=amd64 go build -o histogramQuantile histogramQuantile.go
-
-// The following code is adapted from the following source:
-// https://github.com/prometheus/prometheus/blob/main/promql/quantile.go
-
-type bucket struct {
- upperBound float64
- count float64
-}
-
-// buckets implements sort.Interface.
-type buckets []bucket
-
-func (b buckets) Len() int { return len(b) }
-func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
-func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
-
-// bucketQuantile calculates the quantile 'q' based on the given buckets. The
-// buckets will be sorted by upperBound by this function (i.e. no sorting
-// needed before calling this function). The quantile value is interpolated
-// assuming a linear distribution within a bucket. However, if the quantile
-// falls into the highest bucket, the upper bound of the 2nd highest bucket is
-// returned. A natural lower bound of 0 is assumed if the upper bound of the
-// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
-// happens linearly between 0 and the upper bound of the lowest bucket.
-// However, if the lowest bucket has an upper bound less or equal 0, this upper
-// bound is returned if the quantile falls into the lowest bucket.
-//
-// There are a number of special cases (once we have a way to report errors
-// happening during evaluations of AST functions, we should report those
-// explicitly):
-//
-// If 'buckets' has 0 observations, NaN is returned.
-//
-// If 'buckets' has fewer than 2 elements, NaN is returned.
-//
-// If the highest bucket is not +Inf, NaN is returned.
-//
-// If q==NaN, NaN is returned.
-//
-// If q<0, -Inf is returned.
-//
-// If q>1, +Inf is returned.
-func bucketQuantile(q float64, buckets buckets) float64 {
- if math.IsNaN(q) {
- return math.NaN()
- }
- if q < 0 {
- return math.Inf(-1)
- }
- if q > 1 {
- return math.Inf(+1)
- }
- sort.Sort(buckets)
- if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
- return math.NaN()
- }
-
- buckets = coalesceBuckets(buckets)
- ensureMonotonic(buckets)
-
- if len(buckets) < 2 {
- return math.NaN()
- }
- observations := buckets[len(buckets)-1].count
- if observations == 0 {
- return math.NaN()
- }
- rank := q * observations
- b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
-
- if b == len(buckets)-1 {
- return buckets[len(buckets)-2].upperBound
- }
- if b == 0 && buckets[0].upperBound <= 0 {
- return buckets[0].upperBound
- }
- var (
- bucketStart float64
- bucketEnd = buckets[b].upperBound
- count = buckets[b].count
- )
- if b > 0 {
- bucketStart = buckets[b-1].upperBound
- count -= buckets[b-1].count
- rank -= buckets[b-1].count
- }
- return bucketStart + (bucketEnd-bucketStart)*(rank/count)
-}
-
-// coalesceBuckets merges buckets with the same upper bound.
-//
-// The input buckets must be sorted.
-func coalesceBuckets(buckets buckets) buckets {
- last := buckets[0]
- i := 0
- for _, b := range buckets[1:] {
- if b.upperBound == last.upperBound {
- last.count += b.count
- } else {
- buckets[i] = last
- last = b
- i++
- }
- }
- buckets[i] = last
- return buckets[:i+1]
-}
-
-// The assumption that bucket counts increase monotonically with increasing
-// upperBound may be violated during:
-//
-// * Recording rule evaluation of histogram_quantile, especially when rate()
-// has been applied to the underlying bucket timeseries.
-// * Evaluation of histogram_quantile computed over federated bucket
-// timeseries, especially when rate() has been applied.
-//
-// This is because scraped data is not made available to rule evaluation or
-// federation atomically, so some buckets are computed with data from the
-// most recent scrapes, but the other buckets are missing data from the most
-// recent scrape.
-//
-// Monotonicity is usually guaranteed because if a bucket with upper bound
-// u1 has count c1, then any bucket with a higher upper bound u > u1 must
-// have counted all c1 observations and perhaps more, so that c >= c1.
-//
-// Randomly interspersed partial sampling breaks that guarantee, and rate()
-// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
-// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
-// monotonicity is broken. It is exacerbated by rate() because under normal
-// operation, cumulative counting of buckets will cause the bucket counts to
-// diverge such that small differences from missing samples are not a problem.
-// rate() removes this divergence.)
-//
-// bucketQuantile depends on that monotonicity to do a binary search for the
-// bucket with the φ-quantile count, so breaking the monotonicity
-// guarantee causes bucketQuantile() to return undefined (nonsense) results.
-//
-// As a somewhat hacky solution until ingestion is atomic per scrape, we
-// calculate the "envelope" of the histogram buckets, essentially removing
-// any decreases in the count between successive buckets.
-
-func ensureMonotonic(buckets buckets) {
- max := buckets[0].count
- for i := 1; i < len(buckets); i++ {
- switch {
- case buckets[i].count > max:
- max = buckets[i].count
- case buckets[i].count < max:
- buckets[i].count = max
- }
- }
-}
-
-// End of copied code.
-
-func readLines() []string {
- r := bufio.NewReader(os.Stdin)
- bytes := []byte{}
- lines := []string{}
- for {
- line, isPrefix, err := r.ReadLine()
- if err != nil {
- break
- }
- bytes = append(bytes, line...)
- if !isPrefix {
- str := strings.TrimSpace(string(bytes))
- if len(str) > 0 {
- lines = append(lines, str)
- bytes = []byte{}
- }
- }
- }
- if len(bytes) > 0 {
- lines = append(lines, string(bytes))
- }
- return lines
-}
-
-func main() {
- lines := readLines()
- for _, text := range lines {
- // Example input
- // "[1, 2, 4, 8, 16]", "[1, 5, 8, 10, 14]", 0.9"
- // bounds - counts - quantile
- parts := strings.Split(text, "\",")
-
- var bucketNumbers []float64
- // Strip the ends with square brackets
- text = parts[0][2 : len(parts[0])-1]
- // Parse the bucket bounds
- for _, num := range strings.Split(text, ",") {
- num = strings.TrimSpace(num)
- number, err := strconv.ParseFloat(num, 64)
- if err == nil {
- bucketNumbers = append(bucketNumbers, number)
- }
- }
-
- var bucketCounts []float64
- // Strip the ends with square brackets
- text = parts[1][2 : len(parts[1])-1]
- // Parse the bucket counts
- for _, num := range strings.Split(text, ",") {
- num = strings.TrimSpace(num)
- number, err := strconv.ParseFloat(num, 64)
- if err == nil {
- bucketCounts = append(bucketCounts, number)
- }
- }
-
- // Parse the quantile
- q, err := strconv.ParseFloat(parts[2], 64)
- var b buckets
-
- if err == nil {
- for i := 0; i < len(bucketNumbers); i++ {
- b = append(b, bucket{upperBound: bucketNumbers[i], count: bucketCounts[i]})
- }
- }
- fmt.Println(bucketQuantile(q, b))
- }
-}
diff --git a/examples/tracetest-signoz/tests/list-tests.yaml b/examples/tracetest-signoz/tests/list-tests.yaml
deleted file mode 100644
index 294b8af9fe..0000000000
--- a/examples/tracetest-signoz/tests/list-tests.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-type: Test
-spec:
- id: e9c6cff9-974d-4263-8a23-22f1e9f975aa
- name: List all tracetest tests
- description: List all existing tests from tracetest API
- trigger:
- type: http
- httpRequest:
- url: http://localhost:11633/api/tests
- method: GET
- headers:
- - key: Content-Type
- value: application/json
- grpc:
- protobufFile: ""
- address: ""
- method: ""
- specs:
- - selector: span[tracetest.span.type="http" name="GET /api/tests"]
- assertions:
- - attr:tracetest.selected_spans.count = 1
diff --git a/examples/tracetest-signoz/tracetest/collector.config.yaml b/examples/tracetest-signoz/tracetest/collector.config.yaml
deleted file mode 100644
index dd97e97add..0000000000
--- a/examples/tracetest-signoz/tracetest/collector.config.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-receivers:
- otlp:
- protocols:
- grpc:
- http:
-
-processors:
- batch:
- timeout: 100ms
-
- # Data sources: traces
- probabilistic_sampler:
- hash_seed: 22
- sampling_percentage: 100
-
-exporters:
- # OTLP for Tracetest
- otlp/tracetest:
- endpoint: tracetest:4317 # Send traces to Tracetest.
- # Read more in docs here: https://docs.tracetest.io/configuration/connecting-to-data-stores/opentelemetry-collector
- tls:
- insecure: true
- # OTLP for Signoz
- otlp/signoz:
- endpoint: signoz-otel-collector:4317
- tls:
- insecure: true
-
-service:
- pipelines:
- traces:
- receivers: [otlp]
- processors: [probabilistic_sampler, batch]
- exporters: [otlp/signoz,otlp/tracetest]
diff --git a/examples/tracetest-signoz/tracetest/tracetest-config.yaml b/examples/tracetest-signoz/tracetest/tracetest-config.yaml
deleted file mode 100644
index cbe3226feb..0000000000
--- a/examples/tracetest-signoz/tracetest/tracetest-config.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-postgres:
- host: postgres
- user: postgres
- password: postgres
- port: 5432
- dbname: postgres
- params: sslmode=disable
-
-telemetry:
- exporters:
- collector:
- serviceName: tracetest
- sampling: 100 # 100%
- exporter:
- type: collector
- collector:
- endpoint: otel-collector:4317
-
-server:
- telemetry:
- exporter: collector
diff --git a/examples/tracetest-signoz/tracetest/tracetest-provision.yaml b/examples/tracetest-signoz/tracetest/tracetest-provision.yaml
deleted file mode 100644
index 404333ddc4..0000000000
--- a/examples/tracetest-signoz/tracetest/tracetest-provision.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-type: PollingProfile
-spec:
- name: Default
- strategy: periodic
- default: true
- periodic:
- retryDelay: 5s
- timeout: 10m
-
----
-type: DataStore
-spec:
- name: Signoz
- type: signoz
----
-type: TestRunner
-spec:
- id: current
- name: default
- requiredGates:
- - analyzer-score
- - test-specs