Skip to content

Commit

Permalink
Add more panels
Browse files Browse the repository at this point in the history
Signed-off-by: Dimitar Dimitrov <[email protected]>
  • Loading branch information
dimitarvdimitrov committed Nov 25, 2024
1 parent 28c8594 commit 3fc04c5
Showing 1 changed file with 64 additions and 28 deletions.
92 changes: 64 additions & 28 deletions operations/mimir-mixin/dashboards/writes.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -239,30 +239,6 @@ local filename = 'mimir-writes.json';
],
) + $.aliasColors({ successful: $._colors.success, failed: $._colors.failed, 'read errors': $._colors.failed }) + $.stack,
)
.addPanel(
$.timeseriesPanel('Kafka fetch throughput') +
$.panelDescription(
'Kafka fetch throughput',
|||
Throughput of fetches received from Kafka brokers.
This panel shows the rate of bytes fetched from Kafka brokers, and the rate of bytes discarded.
The discarded bytes are due to concurrent fetching.
Discarded bytes amounting to up to 10% of the total fetched bytes are exepcted during startup when there is higher concurrency in fetching.
Discarded bytes amounting to around 1% of the total fetched bytes are expected during normal operation.
High values of discarded bytes might indicate inefficient estimation of record size. This can be verified via the cortex_ingest_storage_reader_bytes_per_record metric.
|||
) +
$.queryPanel([
'sum(rate(cortex_ingest_storage_reader_fetch_bytes_total{%s}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
'sum(rate(cortex_ingest_storage_reader_fetched_discarded_bytes_total{%s}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
], [
'Fetched bytes (decompressed)',
'Discarded bytes (decompressed)',
]) +
{ fieldConfig+: { defaults+: { unit: 'Bps' } } },
)
.addPanel(
$.timeseriesPanel('Kafka records batch latency') +
$.panelDescription(
Expand Down Expand Up @@ -292,10 +268,59 @@ local filename = 'mimir-writes.json';
} +
$.stack,
)
.addPanel(
$.timeseriesPanel('Record size') +
$.panelDescription(
'Record size',
|||
Concurrent fetching estimates the size of records.
The estimation is used to enforce the max-buffered-bytes limit and to reduce discarded bytes.
|||
) +
$.queryPanel([
|||
sum(rate(cortex_ingest_storage_reader_fetch_bytes_total{%s}[$__rate_interval]))
/
sum(rate(cortex_ingest_storage_reader_records_per_fetch_sum{%s}[$__rate_interval]))
||| % [$.jobMatcher($._config.job_names.ingester), $.jobMatcher($._config.job_names.ingester)],
|||
histogram_avg(sum(rate(cortex_ingest_storage_reader_bytes_per_record{%s}[$__rate_interval])))
|||
% [$.jobMatcher($._config.job_names.ingester)],
], [
'Actual bytes per record (avg)',
'Estimated bytes per record (avg)',
]) +
{ fieldConfig+: { defaults+: { unit: 'bytes' } } },
)
)
.addRowIf(
$._config.show_ingest_storage_panels,
$.row('')
.addPanel(
$.timeseriesPanel('Kafka fetch throughput') +
$.panelDescription(
'Kafka fetch throughput',
|||
Throughput of fetches received from Kafka brokers.
This panel shows the rate of bytes fetched from Kafka brokers, and the rate of bytes discarded.
The discarded bytes are due to concurrently fetching overlapping overlapping offsets.
Discarded bytes amounting to up to 10% of the total fetched bytes are exepcted during startup when there is higher concurrency in fetching.
Discarded bytes amounting to around 1% of the total fetched bytes are expected during normal operation.
High values of discarded bytes might indicate inaccurate estimation of record size.
|||
) +
$.queryPanel([
'sum(rate(cortex_ingest_storage_reader_fetch_bytes_total{%s}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
'sum(rate(cortex_ingest_storage_reader_fetched_discarded_bytes_total{%s}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
], [
'Fetched bytes (decompressed)',
'Discarded bytes (decompressed)',
]) +
{ fieldConfig+: { defaults+: { unit: 'Bps' } } },
)
.addPanel(
$.timeseriesPanel('Write request batches processed / sec') +
$.panelDescription(
Expand Down Expand Up @@ -346,10 +371,21 @@ local filename = 'mimir-writes.json';
) + $.aliasColors({ successful: $._colors.success, 'failed (client)': $._colors.clientError, 'failed (server)': $._colors.failed }) + $.stack,
)
.addPanel(
$.ingestStorageIngesterEndToEndLatencyOutliersWhenRunningPanel(),
)
.addPanel(
$.ingestStorageIngesterEndToEndLatencyWhenStartingPanel(),
$.timeseriesPanel('Ingested samples / sec') +
$.panelDescription(
'Ingested samples',
|||
Concurrent ingestion estimates the number of timeseries per batch to choose the optimal concurrency settings.
|||
) +
$.queryPanel([
'histogram_sum(sum(rate(cortex_ingest_storage_reader_pusher_timeseries_per_flush{%s}[$__rate_interval])))' % [$.jobMatcher($._config.job_names.ingester)],
'sum(rate(cortex_ingest_storage_reader_pusher_estimated_timeseries_total{%s}[$__rate_interval]))' % [$.jobMatcher($._config.job_names.ingester)],
], [
'Actual samples',
'Estimated samples',
]) +
{ fieldConfig+: { defaults+: { unit: 'short' } } },
)
)
.addRowIf(
Expand Down

0 comments on commit 3fc04c5

Please sign in to comment.