Skip to content

Commit

Permalink
docs: Add example default metrics with custom labels
Browse files Browse the repository at this point in the history
  • Loading branch information
trallnag committed Jul 17, 2023
1 parent 8346496 commit 3bcb0ce
Show file tree
Hide file tree
Showing 3 changed files with 184 additions and 0 deletions.
4 changes: 4 additions & 0 deletions devel/examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@
FastAPI app run with Uvicorn, but `/metrics` endpoint is exposed on a separate
endpoint using Prometheus' `start_http_server` function.

- **[default-metrics-diff-labels](./default-metrics-diff-labels/):** Usage of
PFI with a custom instrumentation function that mimics the default metrics but
with custom label names.

- **[prom-multi-proc-gunicorn](./prom-multi-proc-gunicorn/):** How to use
FastAPI app run with Gunivorn in combination with Prometheus client library.
Focus on multiprocessing mode.
21 changes: 21 additions & 0 deletions devel/examples/default-metrics-diff-labels/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Example `default-metrics-diff-labels`

Example that shows usage of PFI with a custom instrumentation function that
mimics the default metrics but with custom label names.

To run the example, you must have run `poetry install` and `poetry shell` in the
root of this repository. The following commands are executed relative to this
directory.

Start app with Uvicorn:

```python
uvicorn main:app
```

Interact with the app:

```shell
curl localhost:8000/ping
curl localhost:8000/metrics
```
159 changes: 159 additions & 0 deletions devel/examples/default-metrics-diff-labels/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
from typing import Callable, Optional, Sequence, Union

from fastapi import FastAPI
from prometheus_client import REGISTRY, CollectorRegistry, Counter, Histogram, Summary

from prometheus_fastapi_instrumentator import Instrumentator
from prometheus_fastapi_instrumentator.metrics import Info

PING_TOTAL = Counter("ping", "Number of pings calls.")


def my_metrics(
latency_highr_buckets: Sequence[Union[float, str]] = (
0.01,
0.025,
0.05,
0.075,
0.1,
0.25,
0.5,
0.75,
1,
1.5,
2,
2.5,
3,
3.5,
4,
4.5,
5,
7.5,
10,
30,
60,
),
latency_lowr_buckets: Sequence[Union[float, str]] = (0.1, 0.5, 1),
registry: CollectorRegistry = REGISTRY,
) -> Optional[Callable[[Info], None]]:
def is_duplicated_time_series(error: ValueError) -> bool:
return any(
map(
error.args[0].__contains__,
[
"Duplicated timeseries in CollectorRegistry:",
"Duplicated time series in CollectorRegistry:",
],
)
)

if latency_highr_buckets[-1] != float("inf"):
latency_highr_buckets = [*latency_highr_buckets, float("inf")]

if latency_lowr_buckets[-1] != float("inf"):
latency_lowr_buckets = [*latency_lowr_buckets, float("inf")]

# Starlette will call app.build_middleware_stack() with every new middleware
# added, which will call all this again, which will make the registry
# complain about duplicated metrics.
#
# The Python Prometheus client currently doesn't seem to have a way to
# verify if adding a metric will cause errors or not, so the only way to
# handle it seems to be with this try block.
try:
TOTAL = Counter(
name="http_requests_total",
documentation="Total number of requests by method, status and handler.",
labelnames=(
"my_method",
"my_status",
"my_handler",
),
registry=registry,
)

IN_SIZE = Summary(
name="http_request_size_bytes",
documentation=(
"Content length of incoming requests by handler. "
"Only value of header is respected. Otherwise ignored. "
"No percentile calculated. "
),
labelnames=("my_handler",),
registry=registry,
)

OUT_SIZE = Summary(
name="http_response_size_bytes",
documentation=(
"Content length of outgoing responses by handler. "
"Only value of header is respected. Otherwise ignored. "
"No percentile calculated. "
),
labelnames=("my_handler",),
registry=registry,
)

LATENCY_HIGHR = Histogram(
name="http_request_duration_highr_seconds",
documentation=(
"Latency with many buckets but no API specific labels. "
"Made for more accurate percentile calculations. "
),
buckets=latency_highr_buckets,
registry=registry,
)

LATENCY_LOWR = Histogram(
name="http_request_duration_seconds",
documentation=(
"Latency with only few buckets by handler. "
"Made to be only used if aggregation by handler is important. "
),
buckets=latency_lowr_buckets,
labelnames=(
"my_method",
"my_handler",
),
registry=registry,
)

def instrumentation(info: Info) -> None:
TOTAL.labels(info.method, info.modified_status, info.modified_handler).inc()

IN_SIZE.labels(info.modified_handler).observe(
int(info.request.headers.get("Content-Length", 0))
)

if info.response and hasattr(info.response, "headers"):
OUT_SIZE.labels(info.modified_handler).observe(
int(info.response.headers.get("Content-Length", 0))
)
else:
OUT_SIZE.labels(info.modified_handler).observe(0)

if info.modified_status.startswith("2"):
LATENCY_HIGHR.observe(info.modified_duration)

LATENCY_LOWR.labels(info.modified_handler, info.method).observe(
info.modified_duration
)

return instrumentation

except ValueError as e:
if not is_duplicated_time_series(e):
raise e

return None


app = FastAPI()

Instrumentator().instrument(app).add(my_metrics()).expose(app)


@app.get("/ping")
def get_ping():
PING_TOTAL.inc()
return "pong"

0 comments on commit 3bcb0ce

Please sign in to comment.