diff --git a/.dockerignore b/.dockerignore index 78fffeb3f9..3f7168163e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,3 +6,4 @@ web/ui/node_modules/ web/ui/build/ packaging/windows/LICENSE packaging/windows/agent-windows-amd64.exe +cmd/grafana-agent/Dockerfile \ No newline at end of file diff --git a/.drone/drone.yml b/.drone/drone.yml index 1e1fb6d2da..8f43369108 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -8,7 +8,7 @@ steps: - commands: - apt-get update -y && apt-get install -y libsystemd-dev - make lint - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Lint trigger: event: @@ -23,7 +23,7 @@ platform: steps: - commands: - make GO_TAGS="nodocker" test - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Run Go tests trigger: event: @@ -38,7 +38,7 @@ platform: steps: - commands: - K8S_USE_DOCKER_NETWORK=1 make test - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Run Go tests volumes: - name: docker @@ -61,7 +61,7 @@ platform: steps: - commands: - go test -tags="nodocker,nonetwork" ./... - image: grafana/agent-build-image:0.32.0-windows + image: grafana/agent-build-image:0.33.0-windows name: Run Go tests trigger: ref: @@ -76,7 +76,7 @@ platform: steps: - commands: - make agent-image - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build container volumes: - name: docker @@ -102,7 +102,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agent' - image: grafana/agent-build-image:0.32.0-windows + image: grafana/agent-build-image:0.33.0-windows name: Build container volumes: - name: docker @@ -129,7 +129,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -146,7 +146,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -163,7 +163,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -180,7 +180,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -196,7 +196,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -212,7 +212,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -228,7 +228,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -244,7 +244,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -261,7 +261,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -278,7 +278,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Build trigger: event: @@ -295,7 +295,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= GOEXPERIMENT=cngcrypto make agent-windows-boringcrypto - image: grafana/agent-build-image:0.32.0-boringcrypto + image: grafana/agent-build-image:0.33.0-boringcrypto name: Build trigger: event: @@ -311,7 +311,7 @@ steps: - commands: - DOCKER_OPTS="" make dist/grafana-agent-linux-amd64 - DOCKER_OPTS="" make test-packages - image: grafana/agent-build-image:0.32.0 + image: grafana/agent-build-image:0.33.0 name: Test Linux system packages volumes: - name: docker @@ -407,6 +407,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: 118cf0f805912a99d8c268f18a179dbe69fa701fbf11f9a9effac933b63091e0 +hmac: 509aa746729e5eaf86e4cbb02b07f125399aabefcc3bf5b1693ea2cca2eaa4e1 ... diff --git a/.drone/pipelines/build_images.jsonnet b/.drone/pipelines/build_images.jsonnet index 9378bb703e..b36f5be96b 100644 --- a/.drone/pipelines/build_images.jsonnet +++ b/.drone/pipelines/build_images.jsonnet @@ -36,7 +36,7 @@ local locals = { 'docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD', 'docker run --rm --privileged multiarch/qemu-user-static --reset -p yes', 'docker buildx create --name multiarch --driver docker-container --use', - 'docker buildx build --build-arg="GO_RUNTIME=golang:1.22.0-bullseye" --push --platform linux/amd64,linux/arm64 -t grafana/agent-build-image:$IMAGE_TAG ./build-image', + 'docker buildx build --build-arg="GO_RUNTIME=golang:1.22.1-bullseye" --push --platform linux/amd64,linux/arm64 -t grafana/agent-build-image:$IMAGE_TAG ./build-image', ], }], volumes: [{ @@ -59,7 +59,7 @@ local locals = { 'docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD', 'docker run --rm --privileged multiarch/qemu-user-static --reset -p yes', 'docker buildx create --name multiarch --driver docker-container --use', - 'docker buildx build --build-arg="GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22-bullseye" --push --platform linux/amd64,linux/arm64 -t grafana/agent-build-image:$IMAGE_TAG ./build-image', + 'docker buildx build --build-arg="GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye" --push --platform linux/amd64,linux/arm64 -t grafana/agent-build-image:$IMAGE_TAG ./build-image', ], }], volumes: [{ diff --git a/.github/workflows/check-linux-build-image.yml.disabled b/.github/workflows/check-linux-build-image.yml.disabled index 32737b0b6a..300d203d36 100644 --- a/.github/workflows/check-linux-build-image.yml.disabled +++ b/.github/workflows/check-linux-build-image.yml.disabled @@ -25,7 +25,7 @@ jobs: push: false tags: grafana/agent-build-image:latest build-args: | - GO_RUNTIME=golang:1.22.0-bullseye + GO_RUNTIME=golang:1.22.1-bullseye - name: Create test Linux build image for boring crypto uses: docker/build-push-action@v5 @@ -34,4 +34,4 @@ jobs: push: false tags: grafana/agent-build-image:latest build-args: | - GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22-bullseye \ No newline at end of file + GO_RUNTIME=mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye \ No newline at end of file diff --git a/.github/workflows/helm-release.yml.disabled b/.github/workflows/helm-release.yml.disabled index b5a310bd41..94caa2fd3f 100644 --- a/.github/workflows/helm-release.yml.disabled +++ b/.github/workflows/helm-release.yml.disabled @@ -89,7 +89,7 @@ jobs: git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Set up Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 with: version: v3.10.3 diff --git a/.github/workflows/helm-test.yml b/.github/workflows/helm-test.yml index 9d4738bcd6..378623eebb 100644 --- a/.github/workflows/helm-test.yml +++ b/.github/workflows/helm-test.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Install Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 with: version: v3.10.3 @@ -46,7 +46,7 @@ jobs: fetch-depth: 0 - name: Install Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 with: version: v3.10.3 @@ -71,7 +71,7 @@ jobs: run: ct lint --config ./operations/helm/ct.yaml - name: Create kind cluster - uses: helm/kind-action@v1.8.0 + uses: helm/kind-action@v1.9.0 if: steps.list-changed.outputs.changed == 'true' - name: Add dependency chart repos diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 12914652e1..ceea4dbcbe 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -26,7 +26,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@84384bd6e777ef152729993b8145ea352e9dd3ef + uses: aquasecurity/trivy-action@062f2592684a31eb3aa050cc61e7ca1451cecd3d with: image-ref: 'grafana/agent:main' format: 'template' diff --git a/.gitignore b/.gitignore index 4f5cb31e3c..26615e37e5 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ data-agent /dist.temp/ /packaging/windows/LICENSE /packaging/windows/agent-windows-amd64.exe +web/ui/build .DS_Store buildx-v* @@ -24,4 +25,4 @@ cover*.out .uptodate node_modules -/docs/variables.mk.local \ No newline at end of file +/docs/variables.mk.local diff --git a/ADOPTERS.md b/ADOPTERS.md index c3cf21f58b..97776cbea7 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -4,5 +4,6 @@ production environments. Please send PRs to add or remove organizations. * [AB Tasty](https://www.abtasty.com/) * [Canonical](https://www.ubuntu.com/) * Cerner Enterprise Hosting +* [CLOUDETEER GmbH](https://www.cloudeteer.de/) * [Embark Studios](https://www.embark.dev/) * [Grafana Labs](https://grafana.com) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c268fc70b..223d3eae7b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,12 @@ internal API changes are not present. Main (unreleased) ----------------- +### Enhancements + +- Add support for importing folders as single module to `import.file`. (@wildum) + +- Add support for importing directories as single module to `import.git`. (@wildum) + ### Features - Added a new CLI flag `--stability.level` which defines the minimum stability @@ -19,6 +25,43 @@ Main (unreleased) ### Bugfixes +- Fix an issue where JSON string array elements were not parsed correctly in `loki.source.cloudflare`. (@thampiotr) + +- Fix a bug where structured metadata and parsed field are not passed further in `loki.source.api` (@marchellodev) + +### Other changes + +- Clustering for Grafana Agent in Flow mode has graduated from beta to stable. + +v0.40.2 (2024-03-05) +-------------------- + +### Bugfixes + +- Set permissions on the `Grafana Agent [Flow]` folder when installing via the + windows installer rather than relying on the parent folder permissions. (@erikbaranowski) + +- Set restricted viewing permissions on the `agent-config.yaml` (static mode) or + `config.river` (flow mode) when installing via the Windows installer if the + configuration file does not already exist. (@erikbaranowski) + +- Fix an issue where the import config node would not run after a config reload. (@wildum) + +- Fix an issue where Loki could reject a batch of logs when structured metadata feature is used. (@thampiotr) + +======= +- Fix a duplicate metrics registration panic when recreating static + mode metric instance's write handler. (@rfratto, @hainenber) + +### Other changes + +- Change the Docker base image for Linux containers to `public.ecr.aws/ubuntu/ubuntu:mantic`. (@hainenber) + +v0.40.1 (2024-02-27) +-------------------- + +### Bugfixes + - Fix an issues where the logging config block would trigger an error when trying to send logs to components that were not running. (@wildum) - Fix an issue where a custom component might be wired to a local declare instead of an import declare when they have the same label. (@wildum) @@ -154,6 +197,8 @@ v0.40.0 (2024-02-27) - `grafana-agent` and `grafana-agent-flow` fallback to default X.509 trusted root certificates when the `GODEBUG=x509usefallbackroots=1` environment variable is set. (@hainenber) +- Migrate away from EoL'ed `github.com/aws-sdk-go` v1. (@hainenber) + v0.39.2 (2024-1-31) -------------------- @@ -164,7 +209,6 @@ v0.39.2 (2024-1-31) - An error will be returned in the converter from Static to Flow when `scrape_integration` is set to `true` but no `remote_write` is defined. (@erikbaranowski) - v0.39.1 (2024-01-19) -------------------- @@ -179,7 +223,6 @@ v0.39.1 (2024-01-19) - Fix issue where installing the Windows Agent Flow installer would hang then crash. (@mattdurham) - v0.39.0 (2024-01-09) -------------------- diff --git a/Makefile b/Makefile index c01014d475..e6d823b3eb 100644 --- a/Makefile +++ b/Makefile @@ -207,8 +207,8 @@ ifneq ($(DOCKER_PLATFORM),) DOCKER_FLAGS += --platform=$(DOCKER_PLATFORM) endif -.PHONY: images agent-image -images: agent-image +.PHONY: images agent-image agent-boringcrypto-image +images: agent-image agent-boringcrypto-image agent-image: DOCKER_BUILDKIT=1 docker build $(DOCKER_FLAGS) -t $(AGENT_IMAGE) -f cmd/grafana-agent/Dockerfile . diff --git a/README.md b/README.md index 7ad9d74de7..5993571eef 100644 --- a/README.md +++ b/README.md @@ -98,9 +98,14 @@ launch dependencies to play with Grafana Alloy locally. A new minor release is planned every six weeks. -The release cadence is best-effort: releases may be moved forwards or backwards -if needed. The planned release dates for future minor releases do not change if -one minor release is moved. +The release cadence is best-effort: if necessary, releases may be performed +outside of this cadence, or a scheduled release date can be moved forwards or +backwards. + +Minor releases published on cadence include updating dependencies for upstream +OpenTelemetry Collector code if new versions are available. Minor releases +published outside of the release cadence may not include these dependency +updates. Patch and security releases may be created at any time. diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 602e7cbd1a..05125a2632 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -# NOTE: The GO_RUNTIME is used to switch between the default google go runtime and mcr.microsoft.com/oss/go/microsoft/golang:1.22-bullseye which is a microsoft +# NOTE: The GO_RUNTIME is used to switch between the default google go runtime and mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye which is a microsoft # fork of go that allows using windows crypto instead of boring crypto. Details at https://github.com/microsoft/go/tree/microsoft/main/eng/doc/fips ARG GO_RUNTIME=mustoverride @@ -26,6 +26,13 @@ RUN apk add --no-cache docker-cli docker-cli-buildx FROM alpine:3.17 as helm RUN apk add --no-cache helm +# Dependency: nsis (for building Windows installers) +FROM alpine:3.17 as nsis +RUN wget -nv https://nsis.sourceforge.io/mediawiki/images/4/4a/AccessControl.zip \ + && unzip AccessControl.zip -d /usr/share/nsis/ \ + && mkdir -p /usr/share/nsis/Plugins/x86-unicode \ + && cp /usr/share/nsis/Plugins/i386-unicode/AccessControl.dll /usr/share/nsis/Plugins/x86-unicode/ + # Dependency: Go and Go dependencies FROM ${GO_RUNTIME} as golang @@ -86,12 +93,13 @@ RUN apt-get update \ && gem install --no-document fpm \ && rm -rf /var/lib/apt/lists/* -COPY --from=golangci /bin/golangci-lint /usr/local/bin -COPY --from=docker /usr/bin/docker /usr/bin/docker -COPY --from=docker /usr/libexec/docker/cli-plugins /usr/libexec/docker/cli-plugins -COPY --from=helm /usr/bin/helm /usr/bin/helm -COPY --from=golang /usr/local/go /usr/local/go -COPY --from=golang /go/bin /go/bin +COPY --from=golangci /bin/golangci-lint /usr/local/bin +COPY --from=docker /usr/bin/docker /usr/bin/docker +COPY --from=docker /usr/libexec/docker/cli-plugins /usr/libexec/docker/cli-plugins +COPY --from=helm /usr/bin/helm /usr/bin/helm +COPY --from=nsis /usr/share/nsis/Plugins/x86-unicode /usr/share/nsis/Plugins/x86-unicode +COPY --from=golang /usr/local/go /usr/local/go +COPY --from=golang /go/bin /go/bin # Git tries to prevent misuse of repositories (CVE-2022-24765), but we don't # care about this for build containers, where it's expected that the repository diff --git a/cmd/grafana-agent/Dockerfile b/cmd/grafana-agent/Dockerfile index faa8d2850a..633c3ebed0 100644 --- a/cmd/grafana-agent/Dockerfile +++ b/cmd/grafana-agent/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.32.0 as build +FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.33.0 as build ARG BUILDPLATFORM ARG TARGETPLATFORM ARG TARGETOS @@ -30,7 +30,11 @@ RUN --mount=type=cache,target=/root/.cache/go-build \ GOEXPERIMENT=${GOEXPERIMENT} \ make agent -FROM ubuntu:mantic +FROM public.ecr.aws/ubuntu/ubuntu:mantic + +#Username and uid for grafana-agent user +ARG UID=473 +ARG USERNAME="grafana-agent" LABEL org.opencontainers.image.source="https://github.com/grafana/agent" @@ -44,6 +48,11 @@ EOF COPY --from=build /src/agent/build/grafana-agent /bin/grafana-agent COPY cmd/grafana-agent/example-config.river /etc/agent/agent.river +# Create grafana-agent user in container, but do not set it as default +RUN groupadd --gid $UID $USERNAME +RUN useradd -m -u $UID -g $UID $USERNAME +RUN chown -R $USERNAME:$USERNAME /etc/agent +RUN chown -R $USERNAME:$USERNAME /bin/grafana-agent ENTRYPOINT ["/bin/grafana-agent"] ENV AGENT_DEPLOY_MODE=docker diff --git a/cmd/grafana-agent/Dockerfile.windows b/cmd/grafana-agent/Dockerfile.windows index 24c65bf98f..23a0423db8 100644 --- a/cmd/grafana-agent/Dockerfile.windows +++ b/cmd/grafana-agent/Dockerfile.windows @@ -1,4 +1,4 @@ -FROM grafana/agent-build-image:0.32.0-windows as builder +FROM grafana/agent-build-image:0.33.0-windows as builder ARG VERSION ARG RELEASE_BUILD=1 diff --git a/docs/developer/contributing.md b/docs/developer/contributing.md index 1abac93a26..3c5ffe9765 100644 --- a/docs/developer/contributing.md +++ b/docs/developer/contributing.md @@ -88,6 +88,11 @@ Compiling Grafana Agent on Linux requires extra dependencies: * [systemd headers](https://packages.debian.org/sid/libsystemd-dev) for Promtail * Can be installed on Debian-based distributions with: ```sudo apt-get install libsystemd-dev``` +### Compile on Windows +Compiling Grafana Agent on Windows requires extra dependencies: + +* [tdm-gcc](https://jmeubank.github.io/tdm-gcc/download/) full 64 bit install for cgo compiliation. + ## Pull Request Checklist Changes should be branched off of the `main` branch. It's recommended to rebase diff --git a/docs/developer/release/0-ensure-otel-dep-updated.md b/docs/developer/release/0-ensure-otel-dep-updated.md new file mode 100644 index 0000000000..ea87009aba --- /dev/null +++ b/docs/developer/release/0-ensure-otel-dep-updated.md @@ -0,0 +1,19 @@ +# Ensure OpenTelemetry Collector dependency has been updated + +Every minor release **must** include an update to a newer version of OpenTelemetry +Collector (when available). Because the release cadence of OpenTelemetry is +three times more frequent, this update should happen near the end of a six-week +release cycle, such as 1-2 weeks out. + +If the OpenTelemetry Collector dependency has not been updated within a release +cycle, **the release should be blocked.** + +## Steps + +1. Examine the CHANGELOG to ensure that the OpenTelemetry Collector dependency + has been updated within the release cycle. + +2. If the dependency has been updated: continue the release process as normal. + +3. If the dependency has not been updated: pause the release process and + orchestrate updating the dependency. diff --git a/docs/developer/release/README.md b/docs/developer/release/README.md index 27bba29630..116cf69665 100644 --- a/docs/developer/release/README.md +++ b/docs/developer/release/README.md @@ -18,6 +18,7 @@ Once a release is scheduled, a release shepherd is determined. This person will responsible for ownership of the following workflows: ## Release Candidate Publish +0. [Ensure our OpenTelemetry Collector dependency has been updated](./0-ensure-otel-dep-updated.md) 1. [Create Release Branch](./1-create-release-branch.md) 2. [Cherry Pick Commits](./2-cherry-pick-commits.md) 3. [Update Version in Code](./3-update-version-in-code.md) diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 6cf14905df..55e8817108 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -74,8 +74,14 @@ It is designed to be flexible, performant, and compatible with multiple ecosyste A new minor release is planned every six weeks for the entire {{< param "PRODUCT_NAME" >}}. -The release cadence is best-effort: releases may be moved forwards or backwards if needed. -The planned release dates for future minor releases do not change if one minor release is moved. +The release cadence is best-effort: if necessary, releases may be performed +outside of this cadence, or a scheduled release date can be moved forwards or +backwards. + +Minor releases published on cadence include updating dependencies for upstream +OpenTelemetry Collector code if new versions are available. Minor releases +published outside of the release cadence may not include these dependency +updates. Patch and security releases may be created at any time. diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index 6cf14905df..55e8817108 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -74,8 +74,14 @@ It is designed to be flexible, performant, and compatible with multiple ecosyste A new minor release is planned every six weeks for the entire {{< param "PRODUCT_NAME" >}}. -The release cadence is best-effort: releases may be moved forwards or backwards if needed. -The planned release dates for future minor releases do not change if one minor release is moved. +The release cadence is best-effort: if necessary, releases may be performed +outside of this cadence, or a scheduled release date can be moved forwards or +backwards. + +Minor releases published on cadence include updating dependencies for upstream +OpenTelemetry Collector code if new versions are available. Minor releases +published outside of the release cadence may not include these dependency +updates. Patch and security releases may be created at any time. diff --git a/docs/sources/concepts/clustering.md b/docs/sources/concepts/clustering.md index 1d930287f9..0a6c0e6ac3 100644 --- a/docs/sources/concepts/clustering.md +++ b/docs/sources/concepts/clustering.md @@ -1,19 +1,18 @@ --- canonical: https://grafana.com/docs/alloy/latest/concepts/clustering/ description: Learn about Grafana Alloy clustering concepts -labels: - stage: beta menuTitle: Clustering -title: Clustering (beta) +title: Clustering weight: 500 --- -# Clustering (beta) +# Clustering Clustering enables a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together for workload distribution and high availability. It helps create horizontally scalable deployments with minimal resource and operational overhead. -To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating {{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. +To achieve this, {{< param "PRODUCT_NAME" >}} makes use of an eventually consistent model that assumes all participating +{{< param "PRODUCT_ROOT_NAME" >}}s are interchangeable and converge on using the same configuration file. The behavior of a standalone, non-clustered {{< param "PRODUCT_ROOT_NAME" >}} is the same as if it were a single-node cluster. diff --git a/docs/sources/concepts/modules.md b/docs/sources/concepts/modules.md index fb0bf2abe6..06de0a8a42 100644 --- a/docs/sources/concepts/modules.md +++ b/docs/sources/concepts/modules.md @@ -28,8 +28,7 @@ You can't import a module that contains top-level blocks other than `declare` or Modules are imported into a _namespace_ where the top-level custom components of the imported module are exposed to the importing module. The label of the import block specifies the namespace of an import. -For example, if a configuration contains a block called `import.file "my_module"`, then custom components defined by that module are exposed as `my_module.CUSTOM_COMPONENT_NAME`. -Imported namespaces must be unique across a given importing module. +For example, if a configuration contains a block called `import.file "my_module"`, then custom components defined by that module are exposed as `my_module.CUSTOM_COMPONENT_NAME`. Imported namespaces must be unique across a given importing module. If an import namespace matches the name of a built-in component namespace, such as `prometheus`, the built-in namespace is hidden from the importing module, and only components defined in the imported module may be used. diff --git a/docs/sources/reference/cli/run.md b/docs/sources/reference/cli/run.md index 9cb201d2b2..834ca4f47c 100644 --- a/docs/sources/reference/cli/run.md +++ b/docs/sources/reference/cli/run.md @@ -68,7 +68,7 @@ Components that are no longer defined in the configuration file after reloading All components managed by the component controller are reevaluated after reloading. -## Clustering (beta) +## Clustering The `--cluster.enabled` command-line argument starts {{< param "PRODUCT_ROOT_NAME" >}} in [clustering][] mode. The rest of the `--cluster.*` command-line flags can be used to configure how nodes discover and connect to one another. diff --git a/docs/sources/reference/components/prometheus.operator.probes.md b/docs/sources/reference/components/prometheus.operator.probes.md index 7347d18379..78ace361a9 100644 --- a/docs/sources/reference/components/prometheus.operator.probes.md +++ b/docs/sources/reference/components/prometheus.operator.probes.md @@ -273,4 +273,4 @@ Connecting some components may not be sensible or components may require further Refer to the linked documentation for more details. {{< /admonition >}} - \ No newline at end of file + diff --git a/docs/sources/reference/config-blocks/import.file.md b/docs/sources/reference/config-blocks/import.file.md index 60a6cee183..c7e66c9ff7 100644 --- a/docs/sources/reference/config-blocks/import.file.md +++ b/docs/sources/reference/config-blocks/import.file.md @@ -12,14 +12,18 @@ title: import.file {{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} -The `import.file` block imports custom components from a file and exposes them to the importer. +The `import.file` block imports custom components from a file or a directory and exposes them to the importer. `import.file` blocks must be given a label that determines the namespace where custom components are exposed. +Imported directories are treated as single modules to support composability. +That means that you can define a custom component in one file and use it in another custom component in another file +in the same directory. + ## Usage ```river import.file "NAMESPACE" { - filename = FILENAME + filename = PATH_NAME } ``` @@ -27,11 +31,11 @@ import.file "NAMESPACE" { The following arguments are supported: -Name | Type | Description | Default | Required ------------------|------------|-----------------------------------------------------|--------------|--------- -`filename` | `string` | Path of the file on disk to watch. | | yes -`detector` | `string` | Which file change detector to use (fsnotify, poll). | `"fsnotify"` | no -`poll_frequency` | `duration` | How often to poll for file changes. | `"1m"` | no +| Name | Type | Description | Default | Required | +| ---------------- | ---------- | --------------------------------------------------- | ------------ | -------- | +| `filename` | `string` | Path of the file or directory on disk to watch. | | yes | +| `detector` | `string` | Which file change detector to use (fsnotify, poll). | `"fsnotify"` | no | +| `poll_frequency` | `duration` | How often to poll for file changes. | `"1m"` | no | {{< docs/shared lookup="reference/components/local-file-arguments-text.md" source="alloy" version="" >}} @@ -40,6 +44,7 @@ Name | Type | Description This example imports a module from a file and instantiates a custom component from the import that adds two numbers: {{< collapse title="module.river" >}} + ```river declare "add" { argument "a" {} @@ -50,9 +55,11 @@ declare "add" { } } ``` + {{< /collapse >}} {{< collapse title="importer.river" >}} + ```river import.file "math" { filename = "module.river" @@ -63,4 +70,5 @@ math.add "default" { b = 45 } ``` + {{< /collapse >}} diff --git a/docs/sources/reference/config-blocks/import.git.md b/docs/sources/reference/config-blocks/import.git.md index f7b7f724b1..a9fb4f2987 100644 --- a/docs/sources/reference/config-blocks/import.git.md +++ b/docs/sources/reference/config-blocks/import.git.md @@ -3,15 +3,11 @@ aliases: - ./reference/config-blocks/import.git/ canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.git/ description: Learn about the import.git configuration block -labels: - stage: beta title: import.git --- # import.git -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - The `import.git` block imports custom components from a Git repository and exposes them to the importer. `import.git` blocks must be given a label that determines the namespace where custom components are exposed. @@ -41,7 +37,10 @@ You must set the `repository` attribute to a repository address that Git would r When provided, the `revision` attribute must be set to a valid branch, tag, or commit SHA within the repository. -You must set the `path` attribute to a path accessible from the repository's root, such as `FILE_NAME.river` or `FOLDER_NAME/FILE_NAME.river`. +You must set the `path` attribute to a path accessible from the repository's root. +It can either be a River file such as `FILE_NAME.river` or `DIR_NAME/FILE_NAME.river` or +a directory containing River files such as `DIR_NAME` or `.` if the River files are stored at the root +of the repository. If `pull_frequency` isn't `"0s"`, the Git repository is pulled for updates at the frequency specified. If it's set to `"0s"`, the Git repository is pulled once on init. @@ -89,5 +88,20 @@ math.add "default" { } ``` +This example imports custom components from a directory in a Git repository and uses a custom component to add two numbers: + +```river +import.git "math" { + repository = "https://github.com/wildum/module.git" + revision = "master" + path = "modules" +} + +math.add "default" { + a = 15 + b = 45 +} +``` + [basic_auth]: #basic_auth-block [ssh_key]: #ssh_key-block diff --git a/docs/sources/reference/config-blocks/import.http.md b/docs/sources/reference/config-blocks/import.http.md index c788166f81..397ec9fd49 100644 --- a/docs/sources/reference/config-blocks/import.http.md +++ b/docs/sources/reference/config-blocks/import.http.md @@ -3,15 +3,11 @@ aliases: - ./reference/config-blocks/import.http/ canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.http/ description: Learn about the import.http configuration block -labels: - stage: beta title: import.http --- # import.http -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - `import.http` retrieves a module from an HTTP server. ## Usage diff --git a/docs/sources/reference/config-blocks/import.string.md b/docs/sources/reference/config-blocks/import.string.md index 8259a11b3c..bd7d8a0344 100644 --- a/docs/sources/reference/config-blocks/import.string.md +++ b/docs/sources/reference/config-blocks/import.string.md @@ -3,15 +3,11 @@ aliases: - ./reference/config-blocks/import.string/ canonical: https://grafana.com/docs/alloy/latest/reference/config-blocks/import.string/ description: Learn about the import.string configuration block -labels: - stage: beta title: import.string --- # import.string -{{< docs/shared lookup="stability/beta.md" source="alloy" version="" >}} - The `import.string` block imports custom components from a string and exposes them to the importer. `import.string` blocks must be given a label that determines the namespace where custom components are exposed. diff --git a/docs/sources/reference/config-blocks/tracing.md b/docs/sources/reference/config-blocks/tracing.md index 19bdcc28cc..97cedb47f9 100644 --- a/docs/sources/reference/config-blocks/tracing.md +++ b/docs/sources/reference/config-blocks/tracing.md @@ -82,8 +82,7 @@ Name | Type | Description `max_operations` | `number` | Limit number of operations which can have custom sampling. | `256` | no `refresh_interval` | `duration` | Frequency to poll the URL for new sampling strategies. | `"1m"` | no -The remote sampling strategies are retrieved from the URL specified by the `url` argument, and polled for updates on a timer. -The frequency for how often polling occurs is controlled by the `refresh_interval` argument. +The remote sampling strategies are retrieved from the URL specified by the `url` argument, and polled for updates on a timer. The frequency for how often polling occurs is controlled by the `refresh_interval` argument. Name | Type | Description | Default | Required -------------------|----------------|------------------------------------------------------------------|---------|--------- diff --git a/docs/sources/tasks/configure-agent-clustering.md b/docs/sources/tasks/configure-agent-clustering.md index 024f8a5392..48814033f9 100644 --- a/docs/sources/tasks/configure-agent-clustering.md +++ b/docs/sources/tasks/configure-agent-clustering.md @@ -10,12 +10,6 @@ weight: 400 You can configure {{< param "PRODUCT_NAME" >}} to run with [clustering][] so that individual {{< param "PRODUCT_ROOT_NAME" >}}s can work together for workload distribution and high availability. -{{< admonition type="note" >}} -Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be replaced with equivalent functionality that covers the same use case. - -[beta]: ../../stability/#beta -{{< /admonition >}} - This topic describes how to add clustering to an existing installation. ## Configure {{% param "PRODUCT_NAME" %}} clustering with Helm Chart @@ -34,7 +28,7 @@ To configure clustering: ```yaml agent: - clustering:"/docs/agent/ -> /docs/agent//flow + clustering: enabled: true ``` diff --git a/docs/sources/tasks/distribute-prometheus-scrape-load.md b/docs/sources/tasks/distribute-prometheus-scrape-load.md index 9c7dbc41ee..8b74ad60ed 100644 --- a/docs/sources/tasks/distribute-prometheus-scrape-load.md +++ b/docs/sources/tasks/distribute-prometheus-scrape-load.md @@ -11,12 +11,6 @@ weight: 500 A good predictor for the size of an {{< param "PRODUCT_NAME" >}} deployment is the number of Prometheus targets each {{< param "PRODUCT_ROOT_NAME" >}} scrapes. [Clustering][] with target auto-distribution allows a fleet of {{< param "PRODUCT_ROOT_NAME" >}}s to work together to dynamically distribute their scrape load, providing high-availability. -{{< admonition type="note" >}} -Clustering is a [beta][] feature. Beta features are subject to breaking changes and may be replaced with equivalent functionality that covers the same use case. - -[beta]: ../../stability/#beta -{{< /admonition >}} - ## Before you begin - Familiarize yourself with how to [configure][] existing {{< param "PRODUCT_NAME" >}} installations. diff --git a/docs/sources/tasks/metamonitoring.md b/docs/sources/tasks/metamonitoring.md new file mode 100644 index 0000000000..ecac92e0f5 --- /dev/null +++ b/docs/sources/tasks/metamonitoring.md @@ -0,0 +1,154 @@ +--- +canonical: https://grafana.com/docs/alloy/latest/tasks/setup-metamonitoring/ +description: Learn how to set up meta-monitoring for Grafana Alloy +title: Set up meta-monitoring +weight: 200 +--- + +# Set up meta-monitoring + +You can configure {{< param "PRODUCT_NAME" >}} to collect its own telemetry and forward it to the backend of your choosing. + +This topic describes how to collect and forward {{< param "PRODUCT_NAME" >}}'s metrics, logs and traces data. + +## Components and configuration blocks used in this topic + +* [prometheus.exporter.self][] +* [prometheus.scrape][] +* [logging][] +* [tracing][] + +## Before you begin + +* Identify where to send {{< param "PRODUCT_NAME" >}}'s telemetry data. +* Be familiar with the concept of [Components][] in {{< param "PRODUCT_NAME" >}}. + +## Meta-monitoring metrics + +{{< param "PRODUCT_NAME" >}} exposes its internal metrics using the Prometheus exposition format. + +In this task, you will use the [prometheus.exporter.self][] and [prometheus.scrape][] components to scrape {{< param "PRODUCT_NAME" >}}'s internal metrics and forward it to compatible {{< param "PRODUCT_NAME" >}} components. + +1. Add the following `prometheus.exporter.self` component to your configuration. The component accepts no arguments. + + ```river + prometheus.exporter.self "" { + } + ``` + +1. Add the following `prometheus.scrape` component to your configuration file. + ```river + prometheus.scrape "" { + targets = prometheus.exporter..default.targets + forward_to = [] + } + ``` + + Replace the following: + - _``_: The label for the component such as `default` or `metamonitoring`. The label must be unique across all `prometheus.exporter.self` components in the same configuration file. + - _``_: The label for the scrape component such as `default`. The label must be unique across all `prometheus.scrape` components in the same configuration file. + - _``_: A comma-delimited list of component receivers to forward metrics to. + For example, to send to an existing remote write component, use `prometheus.remote_write.WRITE_LABEL.receiver`. + Similarly, to send data to an existing relabeling component, use `prometheus.relabel.PROCESS_LABEL.receiver`. + To use data in the OTLP format, you can send data to an existing converter component, like `otelcol.receiver.prometheus.OTEL.receiver`. + +The following example demonstrates configuring a possible sequence of components. + +```river +prometheus.exporter.self "default" { +} + +prometheus.scrape "metamonitoring" { + targets = prometheus.exporter.self.default.targets + forward_to = [prometheus.remote_write.default.receiver] +} + +prometheus.remote_write "default" { + endpoint { + url = "http://mimir:9009/api/v1/push" + } +} +``` + +## Meta-monitoring logs + +The [logging][] block defines the logging behavior of {{< param "PRODUCT_NAME" >}}. + +In this task, you will use the [logging][] block to forward {{< param "PRODUCT_NAME" >}}'s logs to a compatible component. +The block is specified without a label and can only be provided once per configuration file. + +1. Add the following `logging` configuration block to the top level of your configuration file. + + ```river + logging { + level = "" + format = "" + write_to = [] + } + ``` + + Replace the following: + - _``_: The log level to use for {{< param "PRODUCT_NAME" >}}'s logs. If the attribute isn't set, it defaults to `info`. + - _``_: The log format to use for {{< param "PRODUCT_NAME" >}}'s logs. If the attribute isn't set, it defaults to `logfmt`. + - _``_: A comma-delimited list of component receivers to forward logs to. + For example, to send to an existing processing component, use `loki.process.PROCESS_LABEL.receiver`. + Similarly, to send data to an existing relabeling component, use `loki.relabel.PROCESS_LABEL.receiver`. + To use data in the OTLP format, you can send data to an existing converter component, like `otelcol.receiver.loki.OTEL.receiver`. + +The following example demonstrates configuring the logging block and sending to a compatible component. + +```river +logging { + level = "warn" + format = "json" + write_to = [loki.write.default.receiver] +} + +loki.write "default" { + endpoint { + url = "http://loki:3100/loki/api/v1/push" + } +} + +``` + +## Meta-monitoring traces + +The [tracing][] block defines the tracing behavior of {{< param "PRODUCT_NAME" >}}. + +In this task you will use the [tracing][] block to forward {{< param "PRODUCT_NAME" >}} internal traces to a compatible component. The block is specified without a label and can only be provided once per configuration file. + +1. Add the following `tracing` configuration block to the top level of your configuration file. + + ```river + tracing { + sampling_fraction = + write_to = [] + } + ``` + + Replace the following: + - _``_: The fraction of traces to keep. If the attribute isn't set, it defaults to `0.1`. + - _``_: A comma-delimited list of component receivers to forward traces to. + For example, to send to an existing OpenTelemetry exporter component use `otelcol.exporter.otlp.EXPORT_LABEL.input`. + +The following example demonstrates configuring the tracing block and sending to a compatible component. + +```river +tracing { + sampling_fraction = 0.1 + write_to = [otelcol.exporter.otlp.default.input] +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "tempo:4317" + } +} +``` + +[prometheus.exporter.self]: ../../reference/components/prometheus.exporter.self +[prometheus.scrape]: ../../reference/components/prometheus.scrape +[logging]: ../../reference/config-blocks/logging +[tracing]: ../../reference/config-blocks/tracing +[Components]: ../../concepts/components diff --git a/docs/sources/tasks/migrate/from-operator.md b/docs/sources/tasks/migrate/from-operator.md index f266e1ce8d..48b0af6cca 100644 --- a/docs/sources/tasks/migrate/from-operator.md +++ b/docs/sources/tasks/migrate/from-operator.md @@ -199,6 +199,13 @@ discovery.relabel "pod_logs" { replacement = "/var/log/pods/*$1/*.log" target_label = "__path__" } + rule { + action = "replace" + source_labels = ["__meta_kubernetes_pod_container_id"] + regex = "^(\\w+):\\/\\/.+$" + replacement = "$1" + target_label = "tmp_container_runtime" + } } local.file_match "pod_logs" { diff --git a/docs/sources/tutorials/flow-by-example/get-started.md b/docs/sources/tutorials/flow-by-example/get-started.md index 93d3fc0285..15bfbea870 100644 --- a/docs/sources/tutorials/flow-by-example/get-started.md +++ b/docs/sources/tutorials/flow-by-example/get-started.md @@ -7,29 +7,21 @@ weight: 10 ## Who is this for? -This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][alloy]. -It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. -It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. -It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. +This set of tutorials contains a collection of examples that build on each other to demonstrate how to configure and use [{{< param "PRODUCT_NAME" >}}][alloy]. It assumes you have a basic understanding of what {{< param "PRODUCT_ROOT_NAME" >}} is and telemetry collection in general. It also assumes a base level of familiarity with Prometheus and PromQL, Loki and LogQL, and basic Grafana navigation. It assumes no knowledge of {{< param "PRODUCT_NAME" >}} or River concepts. ## What is {{% param "PRODUCT_NAME" %}}? -{{< param "PRODUCT_NAME" >}} uses a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. -It is built on top of the [River][] configuration language, which is designed to be fast, simple, and debuggable. +{{< param "PRODUCT_NAME" >}} uses a declarative configuration language that allows you to define a pipeline of telemetry collection, processing, and output. It is built on top of the [River][] configuration language, which is designed to be fast, simple, and debuggable. ## What do I need to get started? -You will need a Linux or Unix environment with Docker installed. -The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. -You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. +You will need a Linux or Unix environment with Docker installed. The examples are designed to be run on a single host so that you can run them on your laptop or in a VM. You are encouraged to follow along with the examples using a `config.river` file and experiment with the examples yourself. -To run the examples, you should have a {{< param "PRODUCT_NAME" >}} binary available. -You can follow the instructions on how to [Install {{< param "PRODUCT_NAME" >}} as a Standalone Binary][install] to get a binary. +To run the examples, you should have a {{< param "PRODUCT_NAME" >}} binary available. You can follow the instructions on how to [Install {{< param "PRODUCT_NAME" >}} as a Standalone Binary][install] to get a binary. ## How should I follow along? -You can use this Docker-compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. -The examples are designed to be run locally, so you can follow along and experiment with them yourself. +You can use this Docker Compose file to set up a local Grafana instance alongside Loki and Prometheus pre-configured as datasources. The examples are designed to be run locally, so you can follow along and experiment with them yourself. ```yaml version: '3' @@ -85,12 +77,9 @@ services: After running `docker-compose up`, open [http://localhost:3000](http://localhost:3000) in your browser to view the Grafana UI. -The tutorials are designed to be followed in order and generally build on each other. -Each example explains what it does and how it works. -They are designed to be run locally, so you can follow along and experiment with them yourself. +The tutorials are designed to be followed in order and generally build on each other. Each example explains what it does and how it works. They are designed to be run locally, so you can follow along and experiment with them yourself. -The Recommended Reading sections in each tutorial provide a list of documentation topics. -To help you understand the concepts used in the example, read the recommended topics in the order given. +The Recommended Reading sections in each tutorial provide a list of documentation topics. To help you understand the concepts used in the example, read the recommended topics in the order given. [alloy]: https://grafana.com/docs/alloy/latest/ [River]: https://github.com/grafana/river diff --git a/go.mod b/go.mod index 23368dd02b..6a989840fa 100644 --- a/go.mod +++ b/go.mod @@ -13,12 +13,11 @@ require ( github.com/PuerkitoBio/rehttp v1.1.0 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.45.25 + github.com/aws/aws-sdk-go v1.45.25 // indirect github.com/aws/aws-sdk-go-v2 v1.25.0 github.com/aws/aws-sdk-go-v2/config v1.27.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 github.com/bmatcuk/doublestar v1.3.4 - github.com/buger/jsonparser v1.1.1 github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4 github.com/cespare/xxhash/v2 v2.2.0 github.com/cilium/ebpf v0.12.3 // indirect @@ -280,7 +279,7 @@ require ( github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 // indirect @@ -606,6 +605,7 @@ require ( github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7 github.com/grafana/agent-remote-config v0.0.2 github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 + github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d github.com/natefinch/atomic v1.0.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 @@ -645,7 +645,6 @@ require ( github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/grafana/jfr-parser v0.8.0 // indirect - github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d // indirect github.com/hetznercloud/hcloud-go/v2 v2.4.0 // indirect github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect diff --git a/go.sum b/go.sum index 93cd5ab33b..4c686d9ee2 100644 --- a/go.sum +++ b/go.sum @@ -424,8 +424,6 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boynux/squid-exporter v1.10.5-0.20230618153315-c1fae094e18e h1:C1vYe728vM2FpXaICJuDRt5zgGyRdMmUGYnVfM7WcLY= github.com/boynux/squid-exporter v1.10.5-0.20230618153315-c1fae094e18e/go.mod h1:8NpZERGK+R9DGuZqqsKfnf2qI/rh7yBT8End29IvgNA= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4 h1:dgjwrjeVe90AeMhrx04TmDKjZe7xqKKEUxT3QKNx9RU= github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4/go.mod h1:aRr7CZ/KleZpcDkQVsNeXE1BFT3xRG8baUHJ7J+j8NI= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= diff --git a/internal/component/common/loki/client/batch.go b/internal/component/common/loki/client/batch.go index c47d60ee1b..7a15927cee 100644 --- a/internal/component/common/loki/client/batch.go +++ b/internal/component/common/loki/client/batch.go @@ -61,7 +61,7 @@ func newBatch(maxStreams int, entries ...loki.Entry) *batch { // add an entry to the batch func (b *batch) add(entry loki.Entry) error { - b.totalBytes += len(entry.Line) + b.totalBytes += entrySize(entry.Entry) // Append the entry to an already existing stream (if any) labels := labelsMapToString(entry.Labels, ReservedLabelTenantID) @@ -150,8 +150,8 @@ func (b *batch) sizeBytes() int { // sizeBytesAfter returns the size of the batch after the input entry // will be added to the batch itself -func (b *batch) sizeBytesAfter(line string) int { - return b.totalBytes + len(line) +func (b *batch) sizeBytesAfter(entry logproto.Entry) int { + return b.totalBytes + entrySize(entry) } // age of the batch since its creation @@ -201,3 +201,11 @@ func (b *batch) reportAsSentData(h SentDataMarkerHandler) { h.UpdateSentData(seg, data) } } + +func entrySize(entry logproto.Entry) int { + structuredMetadataSize := 0 + for _, label := range entry.StructuredMetadata { + structuredMetadataSize += label.Size() + } + return len(entry.Line) + structuredMetadataSize +} diff --git a/internal/component/common/loki/client/batch_test.go b/internal/component/common/loki/client/batch_test.go index db27075800..255c0d38dc 100644 --- a/internal/component/common/loki/client/batch_test.go +++ b/internal/component/common/loki/client/batch_test.go @@ -57,8 +57,9 @@ func TestBatch_add(t *testing.T) { inputEntries: []loki.Entry{ {Labels: model.LabelSet{}, Entry: logEntries[0].Entry}, {Labels: model.LabelSet{}, Entry: logEntries[1].Entry}, + {Labels: model.LabelSet{}, Entry: logEntries[7].Entry}, }, - expectedSizeBytes: len(logEntries[0].Entry.Line) + len(logEntries[1].Entry.Line), + expectedSizeBytes: entrySize(logEntries[0].Entry) + entrySize(logEntries[0].Entry) + entrySize(logEntries[7].Entry), }, "multiple streams with multiple log entries": { inputEntries: []loki.Entry{ diff --git a/internal/component/common/loki/client/client.go b/internal/component/common/loki/client/client.go index b3726cd6f3..8f127d9a73 100644 --- a/internal/component/common/loki/client/client.go +++ b/internal/component/common/loki/client/client.go @@ -166,9 +166,6 @@ type Tripperware func(http.RoundTripper) http.RoundTripper // New makes a new Client. func New(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLineSizeTruncate bool, logger log.Logger) (Client, error) { - if cfg.StreamLagLabels.String() != "" { - return nil, fmt.Errorf("client config stream_lag_labels is deprecated and the associated metric has been removed, stream_lag_labels: %+v", cfg.StreamLagLabels.String()) - } return newClient(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger) } @@ -176,6 +173,9 @@ func newClient(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLin if cfg.URL.URL == nil { return nil, errors.New("client needs target URL") } + if metrics == nil { + return nil, errors.New("metrics must be instantiated") + } ctx, cancel := context.WithCancel(context.Background()) @@ -308,7 +308,7 @@ func (c *client) run() { // If adding the entry to the batch will increase the size over the max // size allowed, we do send the current batch and then create a new one - if batch.sizeBytesAfter(e.Line) > c.cfg.BatchSize { + if batch.sizeBytesAfter(e.Entry) > c.cfg.BatchSize { c.sendBatch(tenantID, batch) batches[tenantID] = newBatch(c.maxStreams, e) @@ -413,11 +413,10 @@ func (c *client) sendBatch(tenantID string, batch *batch) { func (c *client) send(ctx context.Context, tenantID string, buf []byte) (int, error) { ctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout) defer cancel() - req, err := http.NewRequest("POST", c.cfg.URL.String(), bytes.NewReader(buf)) + req, err := http.NewRequestWithContext(ctx, "POST", c.cfg.URL.String(), bytes.NewReader(buf)) if err != nil { return -1, err } - req = req.WithContext(ctx) req.Header.Set("Content-Type", contentType) req.Header.Set("User-Agent", userAgent) diff --git a/internal/component/common/loki/client/client_test.go b/internal/component/common/loki/client/client_test.go index 4849562bdd..94f9e182e2 100644 --- a/internal/component/common/loki/client/client_test.go +++ b/internal/component/common/loki/client/client_test.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/grafana/loki/pkg/push" + "github.com/go-kit/log" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" @@ -34,6 +36,16 @@ var logEntries = []loki.Entry{ {Labels: model.LabelSet{"__tenant_id__": "tenant-1"}, Entry: logproto.Entry{Timestamp: time.Unix(5, 0).UTC(), Line: "line5"}}, {Labels: model.LabelSet{"__tenant_id__": "tenant-2"}, Entry: logproto.Entry{Timestamp: time.Unix(6, 0).UTC(), Line: "line6"}}, {Labels: model.LabelSet{}, Entry: logproto.Entry{Timestamp: time.Unix(6, 0).UTC(), Line: "line0123456789"}}, + { + Labels: model.LabelSet{}, + Entry: logproto.Entry{ + Timestamp: time.Unix(7, 0).UTC(), + Line: "line7", + StructuredMetadata: push.LabelsAdapter{ + {Name: "trace_id", Value: "12345"}, + }, + }, + }, } func TestClient_Handle(t *testing.T) { diff --git a/internal/component/common/loki/client/config.go b/internal/component/common/loki/client/config.go index df684d3c3a..1d5cf98fb0 100644 --- a/internal/component/common/loki/client/config.go +++ b/internal/component/common/loki/client/config.go @@ -45,8 +45,6 @@ type Config struct { // prevent HOL blocking in multitenant deployments. DropRateLimitedBatches bool `yaml:"drop_rate_limited_batches"` - StreamLagLabels flagext.StringSliceCSV `yaml:"stream_lag_labels" doc:"deprecated"` - // Queue controls configuration parameters specific to the queue client Queue QueueConfig } diff --git a/internal/component/common/loki/client/queue_client.go b/internal/component/common/loki/client/queue_client.go index 4f6bef989b..51f14b91c6 100644 --- a/internal/component/common/loki/client/queue_client.go +++ b/internal/component/common/loki/client/queue_client.go @@ -182,9 +182,6 @@ type queueClient struct { // NewQueue creates a new queueClient. func NewQueue(metrics *Metrics, queueClientMetrics *QueueClientMetrics, cfg Config, maxStreams, maxLineSize int, maxLineSizeTruncate bool, logger log.Logger, markerHandler MarkerHandler) (StoppableWriteTo, error) { - if cfg.StreamLagLabels.String() != "" { - return nil, fmt.Errorf("client config stream_lag_labels is deprecated and the associated metric has been removed, stream_lag_labels: %+v", cfg.StreamLagLabels.String()) - } return newQueueClient(metrics, queueClientMetrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger, markerHandler) } @@ -344,7 +341,7 @@ func (c *queueClient) appendSingleEntry(segmentNum int, lbs model.LabelSet, e lo // If adding the entry to the batch will increase the size over the max // size allowed, we do send the current batch and then create a new one - if batch.sizeBytesAfter(e.Line) > c.cfg.BatchSize { + if batch.sizeBytesAfter(e) > c.cfg.BatchSize { c.sendQueue.enqueue(queuedBatch{ TenantID: tenantID, Batch: batch, diff --git a/internal/component/discovery/aws/ec2.go b/internal/component/discovery/aws/ec2.go index 2ce3b18090..7f8deb3693 100644 --- a/internal/component/discovery/aws/ec2.go +++ b/internal/component/discovery/aws/ec2.go @@ -1,11 +1,12 @@ package aws import ( + "context" "errors" "time" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" @@ -84,16 +85,18 @@ func (args *EC2Arguments) SetToDefault() { // Validate implements river.Validator. func (args *EC2Arguments) Validate() error { if args.Region == "" { - sess, err := session.NewSession() + cfgCtx := context.TODO() + cfg, err := awsConfig.LoadDefaultConfig(cfgCtx) if err != nil { return err } - metadata := ec2metadata.New(sess) - region, err := metadata.Region() + + client := imds.NewFromConfig(cfg) + region, err := client.GetRegion(cfgCtx, &imds.GetRegionInput{}) if err != nil { return errors.New("EC2 SD configuration requires a region") } - args.Region = region + args.Region = region.Region } for _, f := range args.Filters { if len(f.Values) == 0 { diff --git a/internal/component/discovery/aws/lightsail.go b/internal/component/discovery/aws/lightsail.go index 786cab3d07..fa9c76a6ef 100644 --- a/internal/component/discovery/aws/lightsail.go +++ b/internal/component/discovery/aws/lightsail.go @@ -1,11 +1,12 @@ package aws import ( + "context" "errors" "time" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/component/common/config" "github.com/grafana/agent/internal/component/discovery" @@ -71,16 +72,18 @@ func (args *LightsailArguments) SetToDefault() { // Validate implements river.Validator. func (args *LightsailArguments) Validate() error { if args.Region == "" { - sess, err := session.NewSession() + cfgCtx := context.TODO() + cfg, err := awsConfig.LoadDefaultConfig(cfgCtx) if err != nil { return err } - metadata := ec2metadata.New(sess) - region, err := metadata.Region() + + client := imds.NewFromConfig(cfg) + region, err := client.GetRegion(cfgCtx, &imds.GetRegionInput{}) if err != nil { return errors.New("Lightsail SD configuration requires a region") } - args.Region = region + args.Region = region.Region } return nil } diff --git a/internal/component/discovery/consulagent/promtail_consulagent.go b/internal/component/discovery/consulagent/promtail_consulagent.go index 77e98d4c4f..2e5d92b336 100644 --- a/internal/component/discovery/consulagent/promtail_consulagent.go +++ b/internal/component/discovery/consulagent/promtail_consulagent.go @@ -511,9 +511,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr // since the service may be registered remotely through a different node. var addr string if srvCheck.Service.Address != "" { - addr = net.JoinHostPort(srvCheck.Service.Address, fmt.Sprintf("%d", srvCheck.Service.Port)) + addr = net.JoinHostPort(srvCheck.Service.Address, strconv.Itoa(srvCheck.Service.Port)) } else { - addr = net.JoinHostPort(member.Addr, fmt.Sprintf("%d", srvCheck.Service.Port)) + addr = net.JoinHostPort(member.Addr, strconv.Itoa(srvCheck.Service.Port)) } labels := model.LabelSet{ @@ -544,7 +544,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr // Add all key/value pairs from the service's tagged addresses as their own labels. for k, v := range srvCheck.Service.TaggedAddresses { name := strutil.SanitizeLabelName(k) - address := fmt.Sprintf("%s:%d", v.Address, v.Port) + address := net.JoinHostPort(v.Address, strconv.Itoa(v.Port)) labels[taggedAddressesLabel+model.LabelName(name)] = model.LabelValue(address) } diff --git a/internal/component/local/file/file.go b/internal/component/local/file/file.go index ae2ed385ea..0655eea7c2 100644 --- a/internal/component/local/file/file.go +++ b/internal/component/local/file/file.go @@ -12,6 +12,7 @@ import ( "github.com/grafana/agent/internal/component" "github.com/grafana/agent/internal/featuregate" + filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/river/rivertypes" ) @@ -41,7 +42,7 @@ type Arguments struct { // Filename indicates the file to watch. Filename string `river:"filename,attr"` // Type indicates how to detect changes to the file. - Type Detector `river:"detector,attr,optional"` + Type filedetector.Detector `river:"detector,attr,optional"` // PollFrequency determines the frequency to check for changes when Type is // Poll. PollFrequency time.Duration `river:"poll_frequency,attr,optional"` @@ -53,7 +54,7 @@ type Arguments struct { // DefaultArguments provides the default arguments for the local.file // component. var DefaultArguments = Arguments{ - Type: DetectorFSNotify, + Type: filedetector.DetectorFSNotify, PollFrequency: time.Minute, } @@ -237,14 +238,14 @@ func (c *Component) configureDetector() error { } switch c.args.Type { - case DetectorPoll: - c.detector = newPoller(pollerOptions{ + case filedetector.DetectorPoll: + c.detector = filedetector.NewPoller(filedetector.PollerOptions{ Filename: c.args.Filename, ReloadFile: reloadFile, PollFrequency: c.args.PollFrequency, }) - case DetectorFSNotify: - c.detector, err = newFSNotify(fsNotifyOptions{ + case filedetector.DetectorFSNotify: + c.detector, err = filedetector.NewFSNotify(filedetector.FSNotifyOptions{ Logger: c.opts.Logger, Filename: c.args.Filename, ReloadFile: reloadFile, diff --git a/internal/component/local/file/file_test.go b/internal/component/local/file/file_test.go index 6f0344b7c4..304fb82937 100644 --- a/internal/component/local/file/file_test.go +++ b/internal/component/local/file/file_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/grafana/agent/internal/component/local/file" + filedetector "github.com/grafana/agent/internal/filedetector" "github.com/grafana/agent/internal/flow/componenttest" "github.com/grafana/river/rivertypes" "github.com/stretchr/testify/require" @@ -16,16 +17,16 @@ import ( func TestFile(t *testing.T) { t.Run("Polling change detector", func(t *testing.T) { - runFileTests(t, file.DetectorPoll) + runFileTests(t, filedetector.DetectorPoll) }) t.Run("Event change detector", func(t *testing.T) { - runFileTests(t, file.DetectorFSNotify) + runFileTests(t, filedetector.DetectorFSNotify) }) } // runFileTests will run a suite of tests with the configured update type. -func runFileTests(t *testing.T, ut file.Detector) { +func runFileTests(t *testing.T, ut filedetector.Detector) { newSuiteController := func(t *testing.T, filename string) *componenttest.Controller { require.NoError(t, os.WriteFile(filename, []byte("First load!"), 0664)) @@ -100,7 +101,7 @@ func TestFile_ImmediateExports(t *testing.T) { go func() { err := tc.Run(componenttest.TestContext(t), file.Arguments{ Filename: testFile, - Type: file.DetectorPoll, + Type: filedetector.DetectorPoll, PollFrequency: 1 * time.Hour, }) require.NoError(t, err) @@ -125,7 +126,7 @@ func TestFile_ExistOnLoad(t *testing.T) { err = tc.Run(canceledContext(), file.Arguments{ Filename: testFile, - Type: file.DetectorPoll, + Type: filedetector.DetectorPoll, PollFrequency: 1 * time.Hour, }) diff --git a/internal/component/local/file_match/file_test.go b/internal/component/local/file_match/file_test.go index 913a1fe680..43795b6f6c 100644 --- a/internal/component/local/file_match/file_test.go +++ b/internal/component/local/file_match/file_test.go @@ -116,6 +116,43 @@ func TestAddingFileInSubDir(t *testing.T) { require.True(t, contains(foundFiles, "t3.txt")) } +func TestAddingFileInAnExcludedSubDir(t *testing.T) { + dir := path.Join(os.TempDir(), "agent_testing", "t3") + os.MkdirAll(dir, 0755) + writeFile(t, dir, "t1.txt") + t.Cleanup(func() { + os.RemoveAll(dir) + }) + included := []string{path.Join(dir, "**", "*.txt")} + excluded := []string{path.Join(dir, "subdir", "*.txt")} + c := createComponent(t, dir, included, excluded) + ct := context.Background() + ct, ccl := context.WithTimeout(ct, 40*time.Second) + defer ccl() + c.args.SyncPeriod = 10 * time.Millisecond + go c.Run(ct) + time.Sleep(20 * time.Millisecond) + writeFile(t, dir, "t2.txt") + subdir := path.Join(dir, "subdir") + os.Mkdir(subdir, 0755) + subdir2 := path.Join(dir, "subdir2") + os.Mkdir(subdir2, 0755) + time.Sleep(20 * time.Millisecond) + // This file will not be included, since it is in the excluded subdir + err := os.WriteFile(path.Join(subdir, "exclude_me.txt"), []byte("asdf"), 0664) + require.NoError(t, err) + // This file will be included, since it is in another subdir + err = os.WriteFile(path.Join(subdir2, "another.txt"), []byte("asdf"), 0664) + require.NoError(t, err) + time.Sleep(20 * time.Millisecond) + ct.Done() + foundFiles := c.getWatchedFiles() + require.Len(t, foundFiles, 3) + require.True(t, contains(foundFiles, "t1.txt")) + require.True(t, contains(foundFiles, "t2.txt")) + require.True(t, contains(foundFiles, "another.txt")) +} + func TestAddingRemovingFileInSubDir(t *testing.T) { dir := path.Join(os.TempDir(), "agent_testing", "t3") os.MkdirAll(dir, 0755) @@ -201,23 +238,23 @@ func TestMultiLabels(t *testing.T) { require.True(t, contains([]discovery.Target{foundFiles[1]}, "t1.txt")) } +// createComponent creates a component with the given paths and labels. The paths and excluded slices are zipped together +// to create the set of targets to pass to the component. func createComponent(t *testing.T, dir string, paths []string, excluded []string) *Component { return createComponentWithLabels(t, dir, paths, excluded, nil) } +// createComponentWithLabels creates a component with the given paths and labels. The paths and excluded slices are +// zipped together to create the set of targets to pass to the component. func createComponentWithLabels(t *testing.T, dir string, paths []string, excluded []string, labels map[string]string) *Component { tPaths := make([]discovery.Target, 0) - for _, p := range paths { + for i, p := range paths { tar := discovery.Target{"__path__": p} for k, v := range labels { tar[k] = v } - tPaths = append(tPaths, tar) - } - for _, p := range excluded { - tar := discovery.Target{"__path_exclude__": p} - for k, v := range labels { - tar[k] = v + if i < len(excluded) { + tar["__path_exclude__"] = excluded[i] } tPaths = append(tPaths, tar) } diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server.go b/internal/component/loki/source/api/internal/lokipush/push_api_server.go index 1b73950cc6..1842522dfa 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server.go @@ -185,7 +185,9 @@ func (s *PushAPIServer) handleLoki(w http.ResponseWriter, r *http.Request) { e := loki.Entry{ Labels: filtered.Clone(), Entry: logproto.Entry{ - Line: entry.Line, + Line: entry.Line, + StructuredMetadata: entry.StructuredMetadata, + Parsed: entry.Parsed, }, } if keepTimestamp { diff --git a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go index 7e38572437..de68ffaf64 100644 --- a/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go +++ b/internal/component/loki/source/api/internal/lokipush/push_api_server_test.go @@ -23,6 +23,7 @@ import ( frelabel "github.com/grafana/agent/internal/component/common/relabel" "github.com/grafana/dskit/flagext" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/push" "github.com/grafana/river" "github.com/phayes/freeport" "github.com/prometheus/client_golang/prometheus" @@ -79,6 +80,10 @@ regex = "dropme" Entry: logproto.Entry{ Timestamp: time.Unix(int64(i), 0), Line: "line" + strconv.Itoa(i), + StructuredMetadata: push.LabelsAdapter{ + {Name: "i", Value: strconv.Itoa(i)}, + {Name: "anotherMetaData", Value: "val"}, + }, }, } } @@ -98,9 +103,18 @@ regex = "dropme" "pushserver": "pushserver1", "stream": "stream1", } + + expectedStructuredMetadata := push.LabelsAdapter{ + {Name: "i", Value: strconv.Itoa(0)}, + {Name: "anotherMetaData", Value: "val"}, + } + // Spot check the first value in the result to make sure relabel rules were applied properly require.Equal(t, expectedLabels, eh.Received()[0].Labels) + // Spot check the first value in the result to make sure structured metadata was received properly + require.Equal(t, expectedStructuredMetadata, eh.Received()[0].StructuredMetadata) + // With keep timestamp enabled, verify timestamp require.Equal(t, time.Unix(99, 0).Unix(), eh.Received()[99].Timestamp.Unix()) diff --git a/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go b/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go index 33c860cf98..dfe0d9a181 100644 --- a/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go +++ b/internal/component/loki/source/cloudflare/internal/cloudflaretarget/target.go @@ -12,7 +12,6 @@ import ( "sync" "time" - "github.com/buger/jsonparser" "github.com/go-kit/log" "github.com/grafana/agent/internal/component/common/loki" "github.com/grafana/agent/internal/component/common/loki/positions" @@ -21,6 +20,7 @@ import ( "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/concurrency" "github.com/grafana/dskit/multierror" + "github.com/grafana/jsonparser" "github.com/grafana/loki/pkg/logproto" "github.com/prometheus/common/model" "go.uber.org/atomic" diff --git a/internal/component/loki/source/docker/internal/dockertarget/target_test.go b/internal/component/loki/source/docker/internal/dockertarget/target_test.go index 701d3f86f5..e32ecb1010 100644 --- a/internal/component/loki/source/docker/internal/dockertarget/target_test.go +++ b/internal/component/loki/source/docker/internal/dockertarget/target_test.go @@ -95,7 +95,7 @@ func TestDockerTarget(t *testing.T) { }, 5*time.Second, 100*time.Millisecond, "Expected log lines were not found within the time limit.") assert.EventuallyWithT(t, func(c *assert.CollectT) { - assert.False(t, tgt.Ready()) + assert.False(c, tgt.Ready()) }, 5*time.Second, 20*time.Millisecond, "Expected target to finish processing within the time limit.") entryHandler.Clear() diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go index 237cba2229..cc376f6a1f 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -118,6 +118,17 @@ func convertAggregationTemporality(temporality string) (string, error) { } } +func FromOTelAggregationTemporality(temporality string) string { + switch temporality { + case "AGGREGATION_TEMPORALITY_DELTA": + return AggregationTemporalityDelta + case "AGGREGATION_TEMPORALITY_CUMULATIVE": + return AggregationTemporalityCumulative + default: + return "" + } +} + // Convert implements connector.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { dimensions := make([]spanmetricsconnector.Dimension, 0, len(args.Dimensions)) diff --git a/internal/component/remote/s3/watcher.go b/internal/component/remote/s3/watcher.go index 01fe5a5f00..f982703b66 100644 --- a/internal/component/remote/s3/watcher.go +++ b/internal/component/remote/s3/watcher.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "context" diff --git a/internal/converter/internal/otelcolconvert/converter.go b/internal/converter/internal/otelcolconvert/converter.go index a32ce391bc..4939289f8b 100644 --- a/internal/converter/internal/otelcolconvert/converter.go +++ b/internal/converter/internal/otelcolconvert/converter.go @@ -58,6 +58,9 @@ type state struct { // converterLookup maps a converter key to the associated converter instance. converterLookup map[converterKey]componentConverter + // extensionLookup maps OTel extensions to Flow component IDs. + extensionLookup map[component.ID]componentID + componentID component.InstanceID // ID of the current component being converted. componentConfig component.Config // Config of the current component being converted. } @@ -98,6 +101,9 @@ func (state *state) flowLabelForComponent(c component.InstanceID) string { // 3. There is no other mechanism which constructs an OpenTelemetry // receiver, processor, or exporter component. // + // 4. Extension components are created once per service and are agnostic to + // pipelines. + // // Considering the points above, the combination of group name and component // name is all that's needed to form a unique label for a single input // config. @@ -177,6 +183,14 @@ func (state *state) nextInstances(c component.InstanceID, dataType component.Dat } } +func (state *state) LookupExtension(id component.ID) componentID { + cid, ok := state.extensionLookup[id] + if !ok { + panic(fmt.Sprintf("no component name found for extension %q", id.Name())) + } + return cid +} + type componentID struct { Name []string Label string diff --git a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go new file mode 100644 index 0000000000..ac07fc6566 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go @@ -0,0 +1,48 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol/auth/basic" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/river/rivertypes" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, basicAuthConverterConverter{}) +} + +type basicAuthConverterConverter struct{} + +func (basicAuthConverterConverter) Factory() component.Factory { + return basicauthextension.NewFactory() +} + +func (basicAuthConverterConverter) InputComponentName() string { return "otelcol.auth.basic" } + +func (basicAuthConverterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toBasicAuthExtension(cfg.(*basicauthextension.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "auth", "basic"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toBasicAuthExtension(cfg *basicauthextension.Config) *basic.Arguments { + return &basic.Arguments{ + Username: cfg.ClientAuth.Username, + Password: rivertypes.Secret(string(cfg.ClientAuth.Password)), + } +} diff --git a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go index 8c542951b7..92e14b4a1b 100644 --- a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go @@ -2,9 +2,11 @@ package otelcolconvert import ( "fmt" + "strings" "github.com/alecthomas/units" "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/component/otelcol/exporter/loadbalancing" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" @@ -30,9 +32,17 @@ func (loadbalancingExporterConverter) ConvertAndAppend(state *state, id componen var diags diag.Diagnostics label := state.FlowComponentLabel() + overrideHook := func(val interface{}) interface{} { + switch val.(type) { + case auth.Handler: + ext := state.LookupExtension(cfg.(*loadbalancingexporter.Config).Protocol.OTLP.Auth.AuthenticatorID) + return common.CustomTokenizer{Expr: fmt.Sprintf("%s.%s.handler", strings.Join(ext.Name, "."), ext.Label)} + } + return val + } args := toLoadbalancingExporter(cfg.(*loadbalancingexporter.Config)) - block := common.NewBlockWithOverride([]string{"otelcol", "exporter", "loadbalancing"}, label, args) + block := common.NewBlockWithOverrideFn([]string{"otelcol", "exporter", "loadbalancing"}, label, args, overrideHook) diags.Add( diag.SeverityLevelInfo, @@ -54,6 +64,10 @@ func toLoadbalancingExporter(cfg *loadbalancingexporter.Config) *loadbalancing.A } func toProtocol(cfg loadbalancingexporter.Protocol) loadbalancing.Protocol { + var a *auth.Handler + if cfg.OTLP.Auth != nil { + a = &auth.Handler{} + } return loadbalancing.Protocol{ // NOTE(rfratto): this has a lot of overlap with converting the // otlpexporter, but otelcol.exporter.loadbalancing uses custom types to @@ -75,7 +89,7 @@ func toProtocol(cfg loadbalancingexporter.Protocol) loadbalancing.Protocol { BalancerName: cfg.OTLP.BalancerName, Authority: cfg.OTLP.Authority, - // TODO(rfratto): handle auth + Auth: a, }, }, } diff --git a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go index 30b7c18ce5..8fbc4809a4 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go @@ -2,9 +2,11 @@ package otelcolconvert import ( "fmt" + "strings" "github.com/alecthomas/units" "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/component/otelcol/exporter/otlp" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" @@ -32,9 +34,17 @@ func (otlpExporterConverter) ConvertAndAppend(state *state, id component.Instanc var diags diag.Diagnostics label := state.FlowComponentLabel() + overrideHook := func(val interface{}) interface{} { + switch val.(type) { + case auth.Handler: + ext := state.LookupExtension(cfg.(*otlpexporter.Config).Auth.AuthenticatorID) + return common.CustomTokenizer{Expr: fmt.Sprintf("%s.%s.handler", strings.Join(ext.Name, "."), ext.Label)} + } + return val + } args := toOtelcolExporterOTLP(cfg.(*otlpexporter.Config)) - block := common.NewBlockWithOverride([]string{"otelcol", "exporter", "otlp"}, label, args) + block := common.NewBlockWithOverrideFn([]string{"otelcol", "exporter", "otlp"}, label, args, overrideHook) diags.Add( diag.SeverityLevelInfo, @@ -78,6 +88,10 @@ func toRetryArguments(cfg exporterhelper.RetrySettings) otelcol.RetryArguments { } func toGRPCClientArguments(cfg configgrpc.GRPCClientSettings) otelcol.GRPCClientArguments { + var a *auth.Handler + if cfg.Auth != nil { + a = &auth.Handler{} + } return otelcol.GRPCClientArguments{ Endpoint: cfg.Endpoint, @@ -93,7 +107,7 @@ func toGRPCClientArguments(cfg configgrpc.GRPCClientSettings) otelcol.GRPCClient BalancerName: cfg.BalancerName, Authority: cfg.Authority, - // TODO(rfratto): auth extension + Auth: a, } } diff --git a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go index 22a8f6555e..d978e89c6a 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go @@ -2,10 +2,12 @@ package otelcolconvert import ( "fmt" + "strings" "time" "github.com/alecthomas/units" "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/auth" "github.com/grafana/agent/internal/component/otelcol/exporter/otlphttp" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" @@ -32,9 +34,17 @@ func (otlpHTTPExporterConverter) ConvertAndAppend(state *state, id component.Ins var diags diag.Diagnostics label := state.FlowComponentLabel() + overrideHook := func(val interface{}) interface{} { + switch val.(type) { + case auth.Handler: + ext := state.LookupExtension(cfg.(*otlphttpexporter.Config).Auth.AuthenticatorID) + return common.CustomTokenizer{Expr: fmt.Sprintf("%s.%s.handler", strings.Join(ext.Name, "."), ext.Label)} + } + return val + } args := toOtelcolExporterOTLPHTTP(cfg.(*otlphttpexporter.Config)) - block := common.NewBlockWithOverride([]string{"otelcol", "exporter", "otlphttp"}, label, args) + block := common.NewBlockWithOverrideFn([]string{"otelcol", "exporter", "otlphttp"}, label, args, overrideHook) diags.Add( diag.SeverityLevelInfo, @@ -55,6 +65,11 @@ func toOtelcolExporterOTLPHTTP(cfg *otlphttpexporter.Config) *otlphttp.Arguments } func toHTTPClientArguments(cfg confighttp.HTTPClientSettings) otelcol.HTTPClientArguments { + var a *auth.Handler + if cfg.Auth != nil { + a = &auth.Handler{} + } + var mic *int var ict *time.Duration defaults := confighttp.NewDefaultHTTPClientSettings() @@ -79,6 +94,6 @@ func toHTTPClientArguments(cfg confighttp.HTTPClientSettings) otelcol.HTTPClient IdleConnTimeout: ict, DisableKeepAlives: cfg.DisableKeepAlives, - // TODO(@tpaschalis): auth extension + Auth: a, } } diff --git a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go new file mode 100644 index 0000000000..5b26f953e2 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go @@ -0,0 +1,103 @@ +package otelcolconvert + +import ( + "fmt" + "time" + + "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/connector/spanmetrics" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, spanmetricsConnectorConverter{}) +} + +type spanmetricsConnectorConverter struct{} + +func (spanmetricsConnectorConverter) Factory() component.Factory { + return spanmetricsconnector.NewFactory() +} + +func (spanmetricsConnectorConverter) InputComponentName() string { + return "otelcol.connector.spanmetrics" +} + +func (spanmetricsConnectorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toSpanmetricsConnector(state, id, cfg.(*spanmetricsconnector.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "connector", "spanmetrics"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toSpanmetricsConnector(state *state, id component.InstanceID, cfg *spanmetricsconnector.Config) *spanmetrics.Arguments { + if cfg == nil { + return nil + } + var ( + nextMetrics = state.Next(id, component.DataTypeMetrics) + ) + + var exponential *spanmetrics.ExponentialHistogramConfig + if cfg.Histogram.Exponential != nil { + exponential = &spanmetrics.ExponentialHistogramConfig{ + MaxSize: cfg.Histogram.Exponential.MaxSize, + } + } + + var explicit *spanmetrics.ExplicitHistogramConfig + if cfg.Histogram.Explicit != nil { + explicit = &spanmetrics.ExplicitHistogramConfig{ + Buckets: cfg.Histogram.Explicit.Buckets, + } + } + + // If none have been explicitly set, assign the upstream default. + if exponential == nil && explicit == nil { + explicit = &spanmetrics.ExplicitHistogramConfig{Buckets: []time.Duration{}} + explicit.SetToDefault() + } + + var dimensions []spanmetrics.Dimension + for _, d := range cfg.Dimensions { + dimensions = append(dimensions, spanmetrics.Dimension{ + Name: d.Name, + Default: d.Default, + }) + } + + return &spanmetrics.Arguments{ + Dimensions: dimensions, + ExcludeDimensions: cfg.ExcludeDimensions, + DimensionsCacheSize: cfg.DimensionsCacheSize, + AggregationTemporality: spanmetrics.FromOTelAggregationTemporality(cfg.AggregationTemporality), + Histogram: spanmetrics.HistogramConfig{ + Disable: cfg.Histogram.Disable, + Unit: cfg.Histogram.Unit.String(), + Exponential: exponential, + Explicit: explicit, + }, + MetricsFlushInterval: cfg.MetricsFlushInterval, + Namespace: cfg.Namespace, + Exemplars: spanmetrics.ExemplarsConfig{ + Enabled: cfg.Exemplars.Enabled, + }, + + Output: &otelcol.ConsumerArguments{ + Metrics: toTokenizedConsumers(nextMetrics), + }, + } +} diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index 768c9b3e5d..719887ac91 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "strings" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" @@ -18,6 +19,7 @@ import ( "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" + "golang.org/x/exp/maps" ) // This package is split into a set of [componentConverter] implementations @@ -149,6 +151,19 @@ func appendConfig(file *builder.File, cfg *otelcol.Config) diag.Diagnostics { diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to interpret config: %s", err)) return diags } + // TODO(rfratto): should this be deduplicated to avoid creating factories + // twice? + converterTable := buildConverterTable() + + // Connector components are defined on the top level of the OpenTelemetry + // config, but inside of the pipeline definitions they act like regular + // receiver and exporter component IDs. + // Connector components instances must _always_ be used both as an exporter + // _and_ a receiver for the signal types they're supporting. + // + // Since we want to construct them individually, we'll exclude them from + // the list of receivers and exporters manually. + connectorIDs := maps.Keys(cfg.Connectors) // NOTE(rfratto): here, the same component ID will be instantiated once for // every group it's in. This means that converting receivers in multiple @@ -158,24 +173,63 @@ func appendConfig(file *builder.File, cfg *otelcol.Config) diag.Diagnostics { // This isn't a problem in pure OpenTelemetry Collector because it internally // deduplicates receiver instances, but since Flow don't have this logic we // need to reject these kinds of configs for now. - if duplicateDiags := validateNoDuplicateReceivers(groups); len(duplicateDiags) > 0 { + if duplicateDiags := validateNoDuplicateReceivers(groups, connectorIDs); len(duplicateDiags) > 0 { diags.AddAll(duplicateDiags) return diags } - // TODO(rfratto): should this be deduplicated to avoid creating factories - // twice? - converterTable := buildConverterTable() + // We build the list of extensions 'activated' (defined in the service) as + // Flow components and keep a mapping of their OTel IDs to the blocks we've + // built. + // Since there's no concept of multiple extensions per group or telemetry + // signal, we can build them before iterating over the groups. + extensionTable := make(map[component.ID]componentID, len(cfg.Service.Extensions)) + + for _, ext := range cfg.Service.Extensions { + cid := component.InstanceID{Kind: component.KindExtension, ID: ext} + + state := &state{ + cfg: cfg, + file: file, + // We pass an empty pipelineGroup to make calls to + // FlowComponentLabel valid for both the converter authors and the + // extension table mapping. + group: &pipelineGroup{}, + + converterLookup: converterTable, + + componentConfig: cfg.Extensions, + componentID: cid, + } + + key := converterKey{Kind: component.KindExtension, Type: ext.Type()} + conv, ok := converterTable[key] + if !ok { + panic(fmt.Sprintf("otelcolconvert: no converter found for key %v", key)) + } + + diags.AddAll(conv.ConvertAndAppend(state, cid, cfg.Extensions[ext])) + + extensionTable[ext] = componentID{ + Name: strings.Split(conv.InputComponentName(), "."), + Label: state.FlowComponentLabel(), + } + } for _, group := range groups { + receiverIDs := filterIDs(group.Receivers(), connectorIDs) + processorIDs := group.Processors() + exporterIDs := filterIDs(group.Exporters(), connectorIDs) + componentSets := []struct { kind component.Kind ids []component.ID configLookup map[component.ID]component.Config }{ - {component.KindReceiver, group.Receivers(), cfg.Receivers}, - {component.KindProcessor, group.Processors(), cfg.Processors}, - {component.KindExporter, group.Exporters(), cfg.Exporters}, + {component.KindReceiver, receiverIDs, cfg.Receivers}, + {component.KindProcessor, processorIDs, cfg.Processors}, + {component.KindExporter, exporterIDs, cfg.Exporters}, + {component.KindConnector, connectorIDs, cfg.Connectors}, } for _, componentSet := range componentSets { @@ -188,6 +242,7 @@ func appendConfig(file *builder.File, cfg *otelcol.Config) diag.Diagnostics { group: &group, converterLookup: converterTable, + extensionLookup: extensionTable, componentConfig: componentSet.configLookup[id], componentID: componentID, @@ -211,13 +266,14 @@ func appendConfig(file *builder.File, cfg *otelcol.Config) diag.Diagnostics { // in two different pipeline groups. This is required because Flow does not // allow the same receiver to be instantiated more than once, while this is // fine in OpenTelemetry due to internal deduplication rules. -func validateNoDuplicateReceivers(groups []pipelineGroup) diag.Diagnostics { +func validateNoDuplicateReceivers(groups []pipelineGroup, connectorIDs []component.ID) diag.Diagnostics { var diags diag.Diagnostics usedReceivers := make(map[component.ID]struct{}) for _, group := range groups { - for _, receiver := range group.Receivers() { + receiverIDs := filterIDs(group.Receivers(), connectorIDs) + for _, receiver := range receiverIDs { if _, found := usedReceivers[receiver]; found { diags.Add(diag.SeverityLevelCritical, fmt.Sprintf( "the configuration is unsupported because the receiver %q is used across multiple pipelines with distinct names", @@ -246,6 +302,12 @@ func buildConverterTable() map[converterKey]componentConverter { table[converterKey{Kind: component.KindExporter, Type: fact.Type()}] = conv case connector.Factory: table[converterKey{Kind: component.KindConnector, Type: fact.Type()}] = conv + // We need this so the connector is available as a destination for state.Next + table[converterKey{Kind: component.KindExporter, Type: fact.Type()}] = conv + // Technically, this isn't required to be here since the entry + // won't be required to look up a destination for state.Next, but + // adding to reinforce the idea of how connectors are used. + table[converterKey{Kind: component.KindReceiver, Type: fact.Type()}] = conv case extension.Factory: table[converterKey{Kind: component.KindExtension, Type: fact.Type()}] = conv } @@ -253,3 +315,21 @@ func buildConverterTable() map[converterKey]componentConverter { return table } + +func filterIDs(in []component.ID, rem []component.ID) []component.ID { + var res []component.ID + + for _, set := range in { + exists := false + for _, id := range rem { + if set == id { + exists = true + } + } + if !exists { + res = append(res, set) + } + } + + return res +} diff --git a/internal/converter/internal/otelcolconvert/pipeline_group.go b/internal/converter/internal/otelcolconvert/pipeline_group.go index 85d84ae4e1..3c6f278aad 100644 --- a/internal/converter/internal/otelcolconvert/pipeline_group.go +++ b/internal/converter/internal/otelcolconvert/pipeline_group.go @@ -1,6 +1,7 @@ package otelcolconvert import ( + "cmp" "fmt" "go.opentelemetry.io/collector/component" @@ -95,7 +96,12 @@ func createPipelineGroups(cfg pipelines.Config) ([]pipelineGroup, error) { groups[key] = group } - return maps.Values(groups), nil + res := maps.Values(groups) + slices.SortStableFunc(res, func(a, b pipelineGroup) int { + return cmp.Compare(a.Name, b.Name) + }) + + return res, nil } // Receivers returns a set of unique IDs for receivers across all telemetry @@ -168,9 +174,9 @@ func (group pipelineGroup) NextTraces(fromID component.InstanceID) []component.I func nextInPipeline(pipeline *pipelines.PipelineConfig, fromID component.InstanceID) []component.InstanceID { switch fromID.Kind { - case component.KindReceiver: - // Receivers should either send to the first processor if one exists or to - // every exporter otherwise. + case component.KindReceiver, component.KindConnector: + // Receivers and connectors should either send to the first processor + // if one exists or to every exporter otherwise. if len(pipeline.Processors) > 0 { return []component.InstanceID{{Kind: component.KindProcessor, ID: pipeline.Processors[0]}} } @@ -180,10 +186,6 @@ func nextInPipeline(pipeline *pipelines.PipelineConfig, fromID component.Instanc // Processors should send to the next processor if one exists or to every // exporter otherwise. processorIndex := slices.Index(pipeline.Processors, fromID.ID) - if processorIndex == -1 { - panic("nextInPipeline: received processor ID not in processor list") - } - if processorIndex+1 < len(pipeline.Processors) { // Send to next processor. return []component.InstanceID{{Kind: component.KindProcessor, ID: pipeline.Processors[processorIndex+1]}} diff --git a/internal/converter/internal/otelcolconvert/testdata/basicauth.river b/internal/converter/internal/otelcolconvert/testdata/basicauth.river new file mode 100644 index 0000000000..1454a7bc53 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/basicauth.river @@ -0,0 +1,28 @@ +otelcol.auth.basic "default" { + username = "username" + password = "password" +} + +otelcol.auth.basic "default_client" { + username = "username2" + password = "password2" +} + +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + auth = otelcol.auth.basic.default.handler + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/basicauth.yaml b/internal/converter/internal/otelcolconvert/testdata/basicauth.yaml new file mode 100644 index 0000000000..bc585d4f9a --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/basicauth.yaml @@ -0,0 +1,49 @@ +extensions: + basicauth: + client_auth: + username: username + password: password + basicauth/client: + client_auth: + username: username2 + password: password2 + basicauth/server: # this extension is not defined in services and shouldn't be converted + htpasswd: + file: .htpasswd + inline: | + ${BASIC_AUTH_USERNAME}:${BASIC_AUTH_PASSWORD} + +receivers: + otlp: + protocols: + grpc: + http: + +processors: + +exporters: + otlp: + auth: + authenticator: basicauth + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +service: + extensions: [basicauth, basicauth/client] + pipelines: + metrics: + receivers: [otlp] + processors: [] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [] + exporters: [otlp] diff --git a/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river b/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river new file mode 100644 index 0000000000..141cfb7795 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river @@ -0,0 +1,25 @@ +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.processor.batch.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.processor.batch "default" { + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.yaml b/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.yaml new file mode 100644 index 0000000000..b53b67761c --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.yaml @@ -0,0 +1,33 @@ +receivers: + otlp: + protocols: + grpc: + http: + +processors: + batch: + +exporters: + otlp: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +service: + pipelines: + metrics: + receivers: [otlp] + processors: [] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [batch] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [] + exporters: [otlp] + diff --git a/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river b/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river new file mode 100644 index 0000000000..655a859f8e --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river @@ -0,0 +1,27 @@ +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.connector.spanmetrics.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} + +otelcol.connector.spanmetrics "default" { + histogram { + explicit { } + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/spanmetrics.yaml b/internal/converter/internal/otelcolconvert/testdata/spanmetrics.yaml new file mode 100644 index 0000000000..58786875cc --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/spanmetrics.yaml @@ -0,0 +1,36 @@ +receivers: + otlp: + protocols: + grpc: + http: + +exporters: + otlp: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +processors: + batch: + +connectors: + spanmetrics: + +service: + pipelines: + traces: + receivers: [otlp] + processors: [] + exporters: [spanmetrics] + metrics: + receivers: [spanmetrics] + processors: [] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [] + exporters: [otlp] + diff --git a/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river b/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river new file mode 100644 index 0000000000..07665b91f2 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river @@ -0,0 +1,49 @@ +otelcol.receiver.otlp "default_traces" { + grpc { } + + http { } + + output { + metrics = [otelcol.exporter.otlp.default_metrics_backend.input] + logs = [] + traces = [otelcol.exporter.otlp.default_traces_backend.input, otelcol.connector.spanmetrics.default.input] + } +} + +otelcol.exporter.otlp "default_metrics_backend" { + client { + endpoint = "database:44317" + } +} + +otelcol.exporter.otlp "default_traces_backend" { + client { + endpoint = "database:34317" + } +} + +otelcol.connector.spanmetrics "default" { + histogram { + explicit { } + } + + output { + metrics = [otelcol.exporter.otlp.default_metrics_backend.input] + } +} + +otelcol.exporter.otlp "foo_metrics_backend_two" { + client { + endpoint = "database:54317" + } +} + +otelcol.connector.spanmetrics "foo_default" { + histogram { + explicit { } + } + + output { + metrics = [otelcol.exporter.otlp.foo_metrics_backend_two.input] + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.yaml b/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.yaml new file mode 100644 index 0000000000..b2ebe10cfa --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.yaml @@ -0,0 +1,48 @@ +receivers: + otlp/traces: + protocols: + grpc: + http: + +exporters: + otlp/traces_backend: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:34317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + + otlp/metrics_backend: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:44317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + + otlp/metrics_backend_two: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:54317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +connectors: + spanmetrics: + histogram: + exponential: + +service: + pipelines: + traces: + receivers: [otlp/traces] + exporters: [otlp/traces_backend, spanmetrics] + metrics: + receivers: [spanmetrics] + exporters: [otlp/metrics_backend] + metrics/foo: + receivers: [spanmetrics] + exporters: [otlp/metrics_backend_two] + diff --git a/internal/component/local/file/detector.go b/internal/filedetector/detector.go similarity index 90% rename from internal/component/local/file/detector.go rename to internal/filedetector/detector.go index 31769c2e21..7379527fc7 100644 --- a/internal/component/local/file/detector.go +++ b/internal/filedetector/detector.go @@ -1,4 +1,4 @@ -package file +package filedetector import ( "context" @@ -64,8 +64,8 @@ func (ut *Detector) UnmarshalText(text []byte) error { return nil } -type fsNotify struct { - opts fsNotifyOptions +type FSNotify struct { + opts FSNotifyOptions cancel context.CancelFunc // watcherMut is needed to prevent race conditions on Windows. This can be @@ -75,7 +75,7 @@ type fsNotify struct { watcher *fsnotify.Watcher } -type fsNotifyOptions struct { +type FSNotifyOptions struct { Logger log.Logger Filename string ReloadFile func() // Callback to request file reload. @@ -84,7 +84,7 @@ type fsNotifyOptions struct { // newFSNotify creates a new fsnotify detector which uses filesystem events to // detect that a file has changed. -func newFSNotify(opts fsNotifyOptions) (*fsNotify, error) { +func NewFSNotify(opts FSNotifyOptions) (*FSNotify, error) { w, err := fsnotify.NewWatcher() if err != nil { return nil, err @@ -98,7 +98,7 @@ func newFSNotify(opts fsNotifyOptions) (*fsNotify, error) { ctx, cancel := context.WithCancel(context.Background()) - wd := &fsNotify{ + wd := &FSNotify{ opts: opts, watcher: w, cancel: cancel, @@ -108,7 +108,7 @@ func newFSNotify(opts fsNotifyOptions) (*fsNotify, error) { return wd, nil } -func (fsn *fsNotify) wait(ctx context.Context) { +func (fsn *FSNotify) wait(ctx context.Context) { pollTick := time.NewTicker(fsn.opts.PollFrequency) defer pollTick.Stop() @@ -150,7 +150,7 @@ func (fsn *fsNotify) wait(ctx context.Context) { } } -func (fsn *fsNotify) Close() error { +func (fsn *FSNotify) Close() error { fsn.watcherMut.Lock() defer fsn.watcherMut.Unlock() @@ -158,22 +158,22 @@ func (fsn *fsNotify) Close() error { return fsn.watcher.Close() } -type poller struct { - opts pollerOptions +type Poller struct { + opts PollerOptions cancel context.CancelFunc } -type pollerOptions struct { +type PollerOptions struct { Filename string ReloadFile func() // Callback to request file reload. PollFrequency time.Duration } // newPoller creates a new poll-based file update detector. -func newPoller(opts pollerOptions) *poller { +func NewPoller(opts PollerOptions) *Poller { ctx, cancel := context.WithCancel(context.Background()) - pw := &poller{ + pw := &Poller{ opts: opts, cancel: cancel, } @@ -182,7 +182,7 @@ func newPoller(opts pollerOptions) *poller { return pw } -func (p *poller) run(ctx context.Context) { +func (p *Poller) run(ctx context.Context) { t := time.NewTicker(p.opts.PollFrequency) defer t.Stop() @@ -200,7 +200,7 @@ func (p *poller) run(ctx context.Context) { } // Close terminates the poller. -func (p *poller) Close() error { +func (p *Poller) Close() error { p.cancel() return nil } diff --git a/internal/flow/import_test.go b/internal/flow/import_test.go index e70e498af2..89fddafc46 100644 --- a/internal/flow/import_test.go +++ b/internal/flow/import_test.go @@ -21,8 +21,10 @@ import ( _ "github.com/grafana/agent/internal/flow/internal/testcomponents/module/string" ) -// The tests are using the .txtar files stored in the testdata folder. +// use const to avoid lint error +const mainFile = "main.river" +// The tests are using the .txtar files stored in the testdata folder. type testImportFile struct { description string // description at the top of the txtar file main string // root config that the controller should load @@ -45,7 +47,7 @@ func buildTestImportFile(t *testing.T, filename string) testImportFile { tc.description = string(archive.Comment) for _, riverConfig := range archive.Files { switch riverConfig.Name { - case "main.river": + case mainFile: tc.main = string(riverConfig.Data) case "module.river": tc.module = string(riverConfig.Data) @@ -132,6 +134,90 @@ func TestImportHTTP(t *testing.T) { } } +type testImportFileFolder struct { + description string // description at the top of the txtar file + main string // root config that the controller should load + module1 string // module imported by the root config + module2 string // another module imported by the root config + removed string // module will be removed in the dir on update + added string // module which will be added in the dir on update + update *updateFile // update can be used to update the content of a file at runtime +} + +func buildTestImportFileFolder(t *testing.T, filename string) testImportFileFolder { + archive, err := txtar.ParseFile(filename) + require.NoError(t, err) + var tc testImportFileFolder + tc.description = string(archive.Comment) + for _, riverConfig := range archive.Files { + switch riverConfig.Name { + case mainFile: + tc.main = string(riverConfig.Data) + case "module1.river": + tc.module1 = string(riverConfig.Data) + case "module2.river": + tc.module2 = string(riverConfig.Data) + case "added.river": + tc.added = string(riverConfig.Data) + case "removed.river": + tc.removed = string(riverConfig.Data) + case "update/module1.river": + require.Nil(t, tc.update) + tc.update = &updateFile{ + name: "module1.river", + updateConfig: string(riverConfig.Data), + } + case "update/module2.river": + require.Nil(t, tc.update) + tc.update = &updateFile{ + name: "module2.river", + updateConfig: string(riverConfig.Data), + } + } + } + return tc +} + +func TestImportFileFolder(t *testing.T) { + directory := "./testdata/import_file_folder" + for _, file := range getTestFiles(directory, t) { + tc := buildTestImportFileFolder(t, filepath.Join(directory, file.Name())) + t.Run(tc.description, func(t *testing.T) { + dir := "tmpTest" + require.NoError(t, os.Mkdir(dir, 0700)) + defer os.RemoveAll(dir) + + if tc.module1 != "" { + require.NoError(t, os.WriteFile(filepath.Join(dir, "module1.river"), []byte(tc.module1), 0700)) + } + + if tc.module2 != "" { + require.NoError(t, os.WriteFile(filepath.Join(dir, "module2.river"), []byte(tc.module2), 0700)) + } + + if tc.removed != "" { + require.NoError(t, os.WriteFile(filepath.Join(dir, "removed.river"), []byte(tc.removed), 0700)) + } + + // TODO: ideally we would like to check the health of the node but that's not yet possible for import nodes. + // We should expect that adding or removing files in the dir is gracefully handled and the node should be + // healthy once it polls the content of the dir again. + testConfig(t, tc.main, "", func() { + if tc.removed != "" { + os.Remove(filepath.Join(dir, "removed.river")) + } + + if tc.added != "" { + require.NoError(t, os.WriteFile(filepath.Join(dir, "added.river"), []byte(tc.added), 0700)) + } + if tc.update != nil { + require.NoError(t, os.WriteFile(filepath.Join(dir, tc.update.name), []byte(tc.update.updateConfig), 0700)) + } + }) + }) + } +} + type testImportError struct { description string main string @@ -145,7 +231,7 @@ func buildTestImportError(t *testing.T, filename string) testImportError { tc.description = string(archive.Comment) for _, riverConfig := range archive.Files { switch riverConfig.Name { - case "main.river": + case mainFile: tc.main = string(riverConfig.Data) case "error": tc.expectedError = string(riverConfig.Data) diff --git a/internal/flow/internal/controller/block_node.go b/internal/flow/internal/controller/block_node.go index 6d3aca5bbb..bab884f2ab 100644 --- a/internal/flow/internal/controller/block_node.go +++ b/internal/flow/internal/controller/block_node.go @@ -11,14 +11,15 @@ import ( type BlockNode interface { dag.Node - // Block returns the current block of the managed config node. + // Block returns the current block managed by the node. Block() *ast.BlockStmt - // Evaluate updates the arguments for the managed component - // by re-evaluating its River block with the provided scope. The managed component - // will be built the first time Evaluate is called. + // Evaluate updates the arguments by re-evaluating the River block with the provided scope. // // Evaluate will return an error if the River block cannot be evaluated or if // decoding to arguments fails. Evaluate(scope *vm.Scope) error + + // UpdateBlock updates the River block used to construct arguments. + UpdateBlock(b *ast.BlockStmt) } diff --git a/internal/flow/internal/controller/component_node.go b/internal/flow/internal/controller/component_node.go index f3d9cdcaa4..1deb18df35 100644 --- a/internal/flow/internal/controller/component_node.go +++ b/internal/flow/internal/controller/component_node.go @@ -2,7 +2,6 @@ package controller import ( "github.com/grafana/agent/internal/component" - "github.com/grafana/river/ast" ) // ComponentNode is a generic representation of a Flow component. @@ -27,9 +26,6 @@ type ComponentNode interface { // ID returns the component ID of the managed component from its River block. ID() ComponentID - // UpdateBlock updates the River block used to construct arguments for the managed component. - UpdateBlock(b *ast.BlockStmt) - // ModuleIDs returns the current list of modules managed by the component. ModuleIDs() []string } diff --git a/internal/flow/internal/controller/loader.go b/internal/flow/internal/controller/loader.go index 74d4b344b2..43e102963e 100644 --- a/internal/flow/internal/controller/loader.go +++ b/internal/flow/internal/controller/loader.go @@ -338,9 +338,15 @@ func (l *Loader) splitComponentBlocks(blocks []*ast.BlockStmt) (componentBlocks, } func (l *Loader) populateDeclareNodes(g *dag.Graph, declareBlocks []*ast.BlockStmt) diag.Diagnostics { - var diags diag.Diagnostics + var ( + diags diag.Diagnostics + node *DeclareNode + blockMap = make(map[string]*ast.BlockStmt, len(declareBlocks)) + ) l.declareNodes = map[string]*DeclareNode{} for _, declareBlock := range declareBlocks { + id := BlockComponentID(declareBlock).String() + if declareBlock.Label == declareType { diags.Add(diag.Diagnostic{ Severity: diag.SeverityLevelError, @@ -350,16 +356,18 @@ func (l *Loader) populateDeclareNodes(g *dag.Graph, declareBlocks []*ast.BlockSt }) continue } - // TODO: if node already exists in the graph, update the block - // instead of copying it. - node := NewDeclareNode(declareBlock) - if g.GetByID(node.NodeID()) != nil { - diags.Add(diag.Diagnostic{ - Severity: diag.SeverityLevelError, - Message: fmt.Sprintf("cannot add declare node %q; node with same ID already exists", node.NodeID()), - }) + + if diag, defined := blockAlreadyDefined(blockMap, id, declareBlock); defined { + diags = append(diags, diag) continue } + + if exist := l.graph.GetByID(id); exist != nil { + node = exist.(*DeclareNode) + node.UpdateBlock(declareBlock) + } else { + node = NewDeclareNode(declareBlock) + } l.componentNodeManager.customComponentReg.registerDeclare(declareBlock) l.declareNodes[node.label] = node g.Add(node) @@ -367,6 +375,21 @@ func (l *Loader) populateDeclareNodes(g *dag.Graph, declareBlocks []*ast.BlockSt return diags } +// blockAlreadyDefined returns (diag, true) if the given id is already in the provided blockMap. +// else it adds the block to the map and returns (empty diag, false). +func blockAlreadyDefined(blockMap map[string]*ast.BlockStmt, id string, block *ast.BlockStmt) (diag.Diagnostic, bool) { + if orig, redefined := blockMap[id]; redefined { + return diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: fmt.Sprintf("block %s already declared at %s", id, ast.StartPos(orig).Position()), + StartPos: block.NamePos.Position(), + EndPos: block.NamePos.Add(len(id) - 1).Position(), + }, true + } + blockMap[id] = block + return diag.Diagnostic{}, false +} + // populateServiceNodes adds service nodes to the graph. func (l *Loader) populateServiceNodes(g *dag.Graph, serviceBlocks []*ast.BlockStmt) diag.Diagnostics { var diags diag.Diagnostics @@ -441,25 +464,33 @@ func (l *Loader) populateServiceNodes(g *dag.Graph, serviceBlocks []*ast.BlockSt // populateConfigBlockNodes adds any config blocks to the graph. func (l *Loader) populateConfigBlockNodes(args map[string]any, g *dag.Graph, configBlocks []*ast.BlockStmt) diag.Diagnostics { var ( - diags diag.Diagnostics - nodeMap = NewConfigNodeMap() + diags diag.Diagnostics + nodeMap = NewConfigNodeMap() + blockMap = make(map[string]*ast.BlockStmt, len(configBlocks)) ) for _, block := range configBlocks { - node, newConfigNodeDiags := NewConfigNode(block, l.globals) - diags = append(diags, newConfigNodeDiags...) - - if g.GetByID(node.NodeID()) != nil { - configBlockStartPos := ast.StartPos(block).Position() - diags.Add(diag.Diagnostic{ - Severity: diag.SeverityLevelError, - Message: fmt.Sprintf("%q block already declared at %s", node.NodeID(), configBlockStartPos), - StartPos: configBlockStartPos, - EndPos: ast.EndPos(block).Position(), - }) - + var ( + node BlockNode + newConfigNodeDiags diag.Diagnostics + ) + id := BlockComponentID(block).String() + if diag, defined := blockAlreadyDefined(blockMap, id, block); defined { + diags = append(diags, diag) continue } + // Check the graph from the previous call to Load to see we can copy an + // existing instance of BlockNode. + if exist := l.graph.GetByID(id); exist != nil { + node = exist.(BlockNode) + node.UpdateBlock(block) + } else { + node, newConfigNodeDiags = NewConfigNode(block, l.globals) + diags = append(diags, newConfigNodeDiags...) + if diags.HasErrors() { + continue + } + } nodeMapDiags := nodeMap.Append(node) diags = append(diags, nodeMapDiags...) @@ -502,18 +533,10 @@ func (l *Loader) populateComponentNodes(g *dag.Graph, componentBlocks []*ast.Blo ) for _, block := range componentBlocks { id := BlockComponentID(block).String() - - if orig, redefined := blockMap[id]; redefined { - diags.Add(diag.Diagnostic{ - Severity: diag.SeverityLevelError, - Message: fmt.Sprintf("Component %s already declared at %s", id, ast.StartPos(orig).Position()), - StartPos: block.NamePos.Position(), - EndPos: block.NamePos.Add(len(id) - 1).Position(), - }) + if diag, defined := blockAlreadyDefined(blockMap, id, block); defined { + diags = append(diags, diag) continue } - blockMap[id] = block - // Check the graph from the previous call to Load to see if we can copy an // existing instance of ComponentNode. if exist := l.graph.GetByID(id); exist != nil { diff --git a/internal/flow/internal/controller/loader_test.go b/internal/flow/internal/controller/loader_test.go index 5105587ba1..672e2dce67 100644 --- a/internal/flow/internal/controller/loader_test.go +++ b/internal/flow/internal/controller/loader_test.go @@ -90,14 +90,30 @@ func TestLoader(t *testing.T) { t.Run("New Graph", func(t *testing.T) { l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(testFile), []byte(testConfig)) + diags := applyFromContent(t, l, []byte(testFile), []byte(testConfig), nil) require.NoError(t, diags.ErrorOrNil()) requireGraph(t, l.Graph(), testGraphDefinition) }) + t.Run("Reload Graph New Config", func(t *testing.T) { + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, []byte(testFile), []byte(testConfig), nil) + require.NoError(t, diags.ErrorOrNil()) + requireGraph(t, l.Graph(), testGraphDefinition) + updatedTestConfig := ` + tracing { + sampling_fraction = 2 + } + ` + diags = applyFromContent(t, l, []byte(testFile), []byte(updatedTestConfig), nil) + require.NoError(t, diags.ErrorOrNil()) + // Expect the same graph because tracing is still there and logging will be added by default. + requireGraph(t, l.Graph(), testGraphDefinition) + }) + t.Run("New Graph No Config", func(t *testing.T) { l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(testFile), nil) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) require.NoError(t, diags.ErrorOrNil()) requireGraph(t, l.Graph(), testGraphDefinition) }) @@ -115,11 +131,11 @@ func TestLoader(t *testing.T) { } ` l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(startFile), []byte(testConfig)) + diags := applyFromContent(t, l, []byte(startFile), []byte(testConfig), nil) origGraph := l.Graph() require.NoError(t, diags.ErrorOrNil()) - diags = applyFromContent(t, l, []byte(testFile), []byte(testConfig)) + diags = applyFromContent(t, l, []byte(testFile), []byte(testConfig), nil) require.NoError(t, diags.ErrorOrNil()) newGraph := l.Graph() @@ -134,7 +150,7 @@ func TestLoader(t *testing.T) { } ` l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(invalidFile), nil) + diags := applyFromContent(t, l, []byte(invalidFile), nil, nil) require.ErrorContains(t, diags.ErrorOrNil(), `cannot find the definition of component name "doesnotexist`) }) @@ -145,25 +161,25 @@ func TestLoader(t *testing.T) { } ` l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(invalidFile), nil) + diags := applyFromContent(t, l, []byte(invalidFile), nil, nil) require.ErrorContains(t, diags.ErrorOrNil(), `component "testcomponents.tick" must have a label`) }) t.Run("Load with correct stability level", func(t *testing.T) { l := controller.NewLoader(newLoaderOptionsWithStability(featuregate.StabilityBeta)) - diags := applyFromContent(t, l, []byte(testFile), nil) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) require.NoError(t, diags.ErrorOrNil()) }) t.Run("Load with below minimum stability level", func(t *testing.T) { l := controller.NewLoader(newLoaderOptionsWithStability(featuregate.StabilityStable)) - diags := applyFromContent(t, l, []byte(testFile), nil) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) require.ErrorContains(t, diags.ErrorOrNil(), "component \"testcomponents.tick\" is at stability level \"beta\", which is below the minimum allowed stability level \"stable\"") }) t.Run("Load with undefined minimum stability level", func(t *testing.T) { l := controller.NewLoader(newLoaderOptionsWithStability(featuregate.StabilityUndefined)) - diags := applyFromContent(t, l, []byte(testFile), nil) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) require.ErrorContains(t, diags.ErrorOrNil(), "stability levels must be defined: got \"beta\" as stability of component \"testcomponents.tick\" and as the minimum stability level") }) @@ -182,7 +198,7 @@ func TestLoader(t *testing.T) { } ` l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(invalidFile), nil) + diags := applyFromContent(t, l, []byte(invalidFile), nil, nil) require.Error(t, diags.ErrorOrNil()) requireGraph(t, l.Graph(), graphDefinition{ @@ -210,9 +226,94 @@ func TestLoader(t *testing.T) { } ` l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(invalidFile), nil) + diags := applyFromContent(t, l, []byte(invalidFile), nil, nil) require.Error(t, diags.ErrorOrNil()) }) + + t.Run("Config block redefined", func(t *testing.T) { + invalidFile := ` + logging {} + logging {} + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, nil, []byte(invalidFile), nil) + require.ErrorContains(t, diags.ErrorOrNil(), `block logging already declared at TestLoader/Config_block_redefined:2:4`) + }) + + t.Run("Config block redefined after reload", func(t *testing.T) { + file := ` + logging {} + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, nil, []byte(file), nil) + require.NoError(t, diags.ErrorOrNil()) + invalidFile := ` + logging {} + logging {} + ` + diags = applyFromContent(t, l, nil, []byte(invalidFile), nil) + require.ErrorContains(t, diags.ErrorOrNil(), `block logging already declared at TestLoader/Config_block_redefined_after_reload:2:4`) + }) + + t.Run("Component block redefined", func(t *testing.T) { + invalidFile := ` + testcomponents.tick "ticker" { + frequency = "1s" + } + testcomponents.tick "ticker" { + frequency = "1s" + } + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, []byte(invalidFile), nil, nil) + require.ErrorContains(t, diags.ErrorOrNil(), `block testcomponents.tick.ticker already declared at TestLoader/Component_block_redefined:2:4`) + }) + + t.Run("Component block redefined after reload", func(t *testing.T) { + file := ` + testcomponents.tick "ticker" { + frequency = "1s" + } + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, []byte(file), nil, nil) + require.NoError(t, diags.ErrorOrNil()) + invalidFile := ` + testcomponents.tick "ticker" { + frequency = "1s" + } + testcomponents.tick "ticker" { + frequency = "1s" + } + ` + diags = applyFromContent(t, l, []byte(invalidFile), nil, nil) + require.ErrorContains(t, diags.ErrorOrNil(), `block testcomponents.tick.ticker already declared at TestLoader/Component_block_redefined_after_reload:2:4`) + }) + + t.Run("Declare block redefined", func(t *testing.T) { + invalidFile := ` + declare "a" {} + declare "a" {} + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, nil, nil, []byte(invalidFile)) + require.ErrorContains(t, diags.ErrorOrNil(), `block declare.a already declared at TestLoader/Declare_block_redefined:2:4`) + }) + + t.Run("Declare block redefined after reload", func(t *testing.T) { + file := ` + declare "a" {} + ` + l := controller.NewLoader(newLoaderOptions()) + diags := applyFromContent(t, l, nil, nil, []byte(file)) + require.NoError(t, diags.ErrorOrNil()) + invalidFile := ` + declare "a" {} + declare "a" {} + ` + diags = applyFromContent(t, l, nil, nil, []byte(invalidFile)) + require.ErrorContains(t, diags.ErrorOrNil(), `block declare.a already declared at TestLoader/Declare_block_redefined_after_reload:2:4`) + }) } // TestScopeWithFailingComponent is used to ensure that the scope is filled out, even if the component @@ -253,13 +354,13 @@ func TestScopeWithFailingComponent(t *testing.T) { } l := controller.NewLoader(newLoaderOptions()) - diags := applyFromContent(t, l, []byte(testFile), nil) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) require.Error(t, diags.ErrorOrNil()) require.Len(t, diags, 1) require.True(t, strings.Contains(diags.Error(), `unrecognized attribute name "frequenc"`)) } -func applyFromContent(t *testing.T, l *controller.Loader, componentBytes []byte, configBytes []byte) diag.Diagnostics { +func applyFromContent(t *testing.T, l *controller.Loader, componentBytes []byte, configBytes []byte, declareBytes []byte) diag.Diagnostics { t.Helper() var ( @@ -281,6 +382,13 @@ func applyFromContent(t *testing.T, l *controller.Loader, componentBytes []byte, } } + if string(declareBytes) != "" { + declareBlocks, diags = fileToBlock(t, declareBytes) + if diags.HasErrors() { + return diags + } + } + applyOptions := controller.ApplyOptions{ ComponentBlocks: componentBlocks, ConfigBlocks: configBlocks, diff --git a/internal/flow/internal/controller/metrics.go b/internal/flow/internal/controller/metrics.go index bccc814408..e60d8a51e3 100644 --- a/internal/flow/internal/controller/metrics.go +++ b/internal/flow/internal/controller/metrics.go @@ -36,18 +36,24 @@ func newControllerMetrics(id string) *controllerMetrics { cm.componentEvaluationTime = prometheus.NewHistogram( prometheus.HistogramOpts{ - Name: "agent_component_evaluation_seconds", - Help: "Time spent performing component evaluation", - ConstLabels: map[string]string{"controller_id": id}, - Buckets: evaluationTimesBuckets, + Name: "agent_component_evaluation_seconds", + Help: "Time spent performing component evaluation", + ConstLabels: map[string]string{"controller_id": id}, + Buckets: evaluationTimesBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, }, ) cm.dependenciesWaitTime = prometheus.NewHistogram( prometheus.HistogramOpts{ - Name: "agent_component_dependencies_wait_seconds", - Help: "Time spent by components waiting to be evaluated after their dependency is updated.", - ConstLabels: map[string]string{"controller_id": id}, - Buckets: evaluationTimesBuckets, + Name: "agent_component_dependencies_wait_seconds", + Help: "Time spent by components waiting to be evaluated after their dependency is updated.", + ConstLabels: map[string]string{"controller_id": id}, + Buckets: evaluationTimesBuckets, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, }, ) diff --git a/internal/flow/internal/controller/node_config_argument.go b/internal/flow/internal/controller/node_config_argument.go index 233b4580b4..56f8537d5a 100644 --- a/internal/flow/internal/controller/node_config_argument.go +++ b/internal/flow/internal/controller/node_config_argument.go @@ -2,6 +2,7 @@ package controller import ( "fmt" + "strings" "sync" "github.com/grafana/river/ast" @@ -85,3 +86,19 @@ func (cn *ArgumentConfigNode) Block() *ast.BlockStmt { // NodeID implements dag.Node and returns the unique ID for the config node. func (cn *ArgumentConfigNode) NodeID() string { return cn.nodeID } + +// UpdateBlock updates the River block used to construct arguments. +// The new block isn't used until the next time Evaluate is invoked. +// +// UpdateBlock will panic if the block does not match the component ID of the +// ArgumentConfigNode. +func (cn *ArgumentConfigNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(strings.Split(cn.nodeID, ".")) { + panic("UpdateBlock called with an River block with a different ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b + cn.eval = vm.New(b.Body) +} diff --git a/internal/flow/internal/controller/node_config_export.go b/internal/flow/internal/controller/node_config_export.go index 891f044d4a..dc05493113 100644 --- a/internal/flow/internal/controller/node_config_export.go +++ b/internal/flow/internal/controller/node_config_export.go @@ -2,6 +2,7 @@ package controller import ( "fmt" + "strings" "sync" "github.com/grafana/river/ast" @@ -74,3 +75,19 @@ func (cn *ExportConfigNode) Block() *ast.BlockStmt { // NodeID implements dag.Node and returns the unique ID for the config node. func (cn *ExportConfigNode) NodeID() string { return cn.nodeID } + +// UpdateBlock updates the River block used to construct arguments. +// The new block isn't used until the next time Evaluate is invoked. +// +// UpdateBlock will panic if the block does not match the component ID of the +// ExportConfigNode. +func (cn *ExportConfigNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(strings.Split(cn.nodeID, ".")) { + panic("UpdateBlock called with an River block with a different ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b + cn.eval = vm.New(b.Body) +} diff --git a/internal/flow/internal/controller/node_config_import.go b/internal/flow/internal/controller/node_config_import.go index fdaf4e3b37..7cc95bdb4d 100644 --- a/internal/flow/internal/controller/node_config_import.go +++ b/internal/flow/internal/controller/node_config_import.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "hash/fnv" + "maps" "path" "path/filepath" "strings" @@ -46,7 +47,7 @@ type ImportConfigNode struct { importChildrenUpdateChan chan struct{} // used to trigger an update of the running children mut sync.RWMutex - importedContent string + importedContent map[string]string importConfigNodesChildren map[string]*ImportConfigNode importChildrenRunning bool importedDeclares map[string]ast.Body @@ -185,7 +186,7 @@ func (cn *ImportConfigNode) Evaluate(scope *vm.Scope) error { } // onContentUpdate is triggered every time the managed import source has new content. -func (cn *ImportConfigNode) onContentUpdate(importedContent string) { +func (cn *ImportConfigNode) onContentUpdate(importedContent map[string]string) { cn.mut.Lock() defer cn.mut.Unlock() @@ -193,31 +194,36 @@ func (cn *ImportConfigNode) onContentUpdate(importedContent string) { defer cn.inContentUpdate.Store(false) // If the source sent the same content, there is no need to reload. - if cn.importedContent == importedContent { + if maps.Equal(cn.importedContent, importedContent) { return } - cn.importedContent = importedContent + cn.importedContent = make(map[string]string) + for k, v := range importedContent { + cn.importedContent[k] = v + } cn.importedDeclares = make(map[string]ast.Body) cn.importConfigNodesChildren = make(map[string]*ImportConfigNode) - parsedImportedContent, err := parser.ParseFile(cn.label, []byte(importedContent)) - if err != nil { - level.Error(cn.logger).Log("msg", "failed to parse file on update", "err", err) - cn.setContentHealth(component.HealthTypeUnhealthy, fmt.Sprintf("imported content cannot be parsed: %s", err)) - return - } + for f, ic := range importedContent { + parsedImportedContent, err := parser.ParseFile(cn.label, []byte(ic)) + if err != nil { + level.Error(cn.logger).Log("msg", "failed to parse file on update", "file", f, "err", err) + cn.setContentHealth(component.HealthTypeUnhealthy, fmt.Sprintf("imported content from %q cannot be parsed: %s", f, err)) + return + } - // populate importedDeclares and importConfigNodesChildren - err = cn.processImportedContent(parsedImportedContent) - if err != nil { - level.Error(cn.logger).Log("msg", "failed to process imported content", "err", err) - cn.setContentHealth(component.HealthTypeUnhealthy, fmt.Sprintf("imported content is invalid: %s", err)) - return + // populate importedDeclares and importConfigNodesChildren + err = cn.processImportedContent(parsedImportedContent) + if err != nil { + level.Error(cn.logger).Log("msg", "failed to process imported content", "file", f, "err", err) + cn.setContentHealth(component.HealthTypeUnhealthy, fmt.Sprintf("imported content from %q is invalid: %s", f, err)) + return + } } // evaluate the importConfigNodesChildren that have been created - err = cn.evaluateChildren() + err := cn.evaluateChildren() if err != nil { level.Error(cn.logger).Log("msg", "failed to evaluate nested import", "err", err) cn.setContentHealth(component.HealthTypeUnhealthy, fmt.Sprintf("nested import block failed to evaluate: %s", err)) @@ -385,6 +391,22 @@ func (cn *ImportConfigNode) run(errChan chan error, updateTasks func() error) er } } +// UpdateBlock updates the River block used to construct arguments. +// The new block isn't used until the next time Evaluate is invoked. +// +// UpdateBlock will panic if the block does not match the component ID of the +// ImportConfigNode. +func (cn *ImportConfigNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(strings.Split(cn.nodeID, ".")) { + panic("UpdateBlock called with an River block with a different ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b + cn.source.SetEval(vm.New(b.Body)) +} + func (cn *ImportConfigNode) Label() string { return cn.label } // Block implements BlockNode and returns the current block of the managed config node. diff --git a/internal/flow/internal/controller/node_config_logging.go b/internal/flow/internal/controller/node_config_logging.go index edee9f0dca..ac95eae5c1 100644 --- a/internal/flow/internal/controller/node_config_logging.go +++ b/internal/flow/internal/controller/node_config_logging.go @@ -2,6 +2,7 @@ package controller import ( "fmt" + "strings" "sync" "github.com/go-kit/log" @@ -80,3 +81,19 @@ func (cn *LoggingConfigNode) Block() *ast.BlockStmt { // NodeID implements dag.Node and returns the unique ID for the config node. func (cn *LoggingConfigNode) NodeID() string { return cn.nodeID } + +// UpdateBlock updates the River block used to construct arguments. +// The new block isn't used until the next time Evaluate is invoked. +// +// UpdateBlock will panic if the block does not match the component ID of the +// LoggingConfigNode. +func (cn *LoggingConfigNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(strings.Split(cn.nodeID, ".")) { + panic("UpdateBlock called with an River block with a different ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b + cn.eval = vm.New(b.Body) +} diff --git a/internal/flow/internal/controller/node_config_tracing.go b/internal/flow/internal/controller/node_config_tracing.go index 9c1c35db77..525e5c336f 100644 --- a/internal/flow/internal/controller/node_config_tracing.go +++ b/internal/flow/internal/controller/node_config_tracing.go @@ -2,6 +2,7 @@ package controller import ( "fmt" + "strings" "sync" "github.com/grafana/agent/internal/flow/tracing" @@ -84,3 +85,19 @@ func (cn *TracingConfigNode) Block() *ast.BlockStmt { // NodeID implements dag.Node and returns the unique ID for the config node. func (cn *TracingConfigNode) NodeID() string { return cn.nodeID } + +// UpdateBlock updates the River block used to construct arguments. +// The new block isn't used until the next time Evaluate is invoked. +// +// UpdateBlock will panic if the block does not match the component ID of the +// LoggingConfigNode. +func (cn *TracingConfigNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(strings.Split(cn.nodeID, ".")) { + panic("UpdateBlock called with an River block with a different ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b + cn.eval = vm.New(b.Body) +} diff --git a/internal/flow/internal/controller/node_declare.go b/internal/flow/internal/controller/node_declare.go index b67350eb55..0dec602839 100644 --- a/internal/flow/internal/controller/node_declare.go +++ b/internal/flow/internal/controller/node_declare.go @@ -1,6 +1,9 @@ package controller import ( + "strings" + "sync" + "github.com/grafana/river/ast" "github.com/grafana/river/vm" ) @@ -10,6 +13,7 @@ type DeclareNode struct { label string nodeID string componentName string + mut sync.RWMutex block *ast.BlockStmt } @@ -42,3 +46,17 @@ func (cn *DeclareNode) Block() *ast.BlockStmt { // NodeID implements dag.Node and returns the unique ID for the config node. func (cn *DeclareNode) NodeID() string { return cn.nodeID } + +// UpdateBlock updates the managed River block. +// +// UpdateBlock will panic if the block does not match the component ID of the +// DeclareNode. +func (cn *DeclareNode) UpdateBlock(b *ast.BlockStmt) { + if !BlockComponentID(b).Equals(strings.Split(cn.nodeID, ".")) { + panic("UpdateBlock called with an River block with a different ID") + } + + cn.mut.Lock() + defer cn.mut.Unlock() + cn.block = b +} diff --git a/internal/flow/internal/controller/scheduler_test.go b/internal/flow/internal/controller/scheduler_test.go index 9f9b3f136a..ad5ebc23ca 100644 --- a/internal/flow/internal/controller/scheduler_test.go +++ b/internal/flow/internal/controller/scheduler_test.go @@ -99,6 +99,7 @@ func (fr fakeRunnable) NodeID() string { return fr.ID } func (fr fakeRunnable) Run(ctx context.Context) error { return fr.Component.Run(ctx) } func (fr fakeRunnable) Block() *ast.BlockStmt { return nil } func (fr fakeRunnable) Evaluate(scope *vm.Scope) error { return nil } +func (fr fakeRunnable) UpdateBlock(b *ast.BlockStmt) {} type mockComponent struct { RunFunc func(ctx context.Context) error diff --git a/internal/flow/internal/importsource/import_file.go b/internal/flow/internal/importsource/import_file.go index 9d919269f6..e0ffa09579 100644 --- a/internal/flow/internal/importsource/import_file.go +++ b/internal/flow/internal/importsource/import_file.go @@ -3,96 +3,254 @@ package importsource import ( "context" "fmt" + "io" + "io/fs" + "os" + "path/filepath" "reflect" + "strings" + "sync" "time" + "github.com/go-kit/log" "github.com/grafana/agent/internal/component" - "github.com/grafana/agent/internal/component/local/file" + filedetector "github.com/grafana/agent/internal/filedetector" + "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/river/vm" ) -// ImportFile imports a module from a file via the local.file component. +// ImportFile imports a module from a file or a folder. type ImportFile struct { - fileComponent *file.Component - arguments component.Arguments - managedOpts component.Options - eval *vm.Evaluator + managedOpts component.Options + eval *vm.Evaluator + onContentChange func(map[string]string) + logger log.Logger + + reloadCh chan struct{} + args FileArguments + + mut sync.RWMutex + detector io.Closer + + healthMut sync.RWMutex + health component.Health } +// waitReadPeriod holds the time to wait before reading a file while the +// source is running. +// +// This prevents from updating too frequently and exporting partial writes. +const waitReadPeriod time.Duration = 30 * time.Millisecond + var _ ImportSource = (*ImportFile)(nil) -func NewImportFile(managedOpts component.Options, eval *vm.Evaluator, onContentChange func(string)) *ImportFile { +func NewImportFile(managedOpts component.Options, eval *vm.Evaluator, onContentChange func(map[string]string)) *ImportFile { opts := managedOpts - opts.OnStateChange = func(e component.Exports) { - onContentChange(e.(file.Exports).Content.Value) - } return &ImportFile{ - managedOpts: opts, - eval: eval, + reloadCh: make(chan struct{}, 1), + managedOpts: opts, + eval: eval, + onContentChange: onContentChange, + logger: managedOpts.Logger, } } -// FileArguments holds values which are used to configure the local.file component. type FileArguments struct { // Filename indicates the file to watch. Filename string `river:"filename,attr"` // Type indicates how to detect changes to the file. - Type file.Detector `river:"detector,attr,optional"` + Type filedetector.Detector `river:"detector,attr,optional"` // PollFrequency determines the frequency to check for changes when Type is Poll. PollFrequency time.Duration `river:"poll_frequency,attr,optional"` } var DefaultFileArguments = FileArguments{ - Type: file.DetectorFSNotify, + Type: filedetector.DetectorFSNotify, PollFrequency: time.Minute, } -type importFileConfigBlock struct { - LocalFileArguments FileArguments `river:",squash"` -} - // SetToDefault implements river.Defaulter. -func (a *importFileConfigBlock) SetToDefault() { - a.LocalFileArguments = DefaultFileArguments +func (a *FileArguments) SetToDefault() { + *a = DefaultFileArguments } func (im *ImportFile) Evaluate(scope *vm.Scope) error { - var arguments importFileConfigBlock + im.mut.Lock() + defer im.mut.Unlock() + + var arguments FileArguments if err := im.eval.Evaluate(scope, &arguments); err != nil { return fmt.Errorf("decoding configuration: %w", err) } - if im.fileComponent == nil { - var err error - im.fileComponent, err = file.New(im.managedOpts, file.Arguments{ - Filename: arguments.LocalFileArguments.Filename, - Type: arguments.LocalFileArguments.Type, - PollFrequency: arguments.LocalFileArguments.PollFrequency, - // isSecret is only used for exported values; modules are not exported - IsSecret: false, - }) - if err != nil { - return fmt.Errorf("creating file component: %w", err) + + if reflect.DeepEqual(im.args, arguments) { + return nil + } + im.args = arguments + + // Force an immediate read of the file to report any potential errors early. + if err := im.readFile(); err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + reloadFile := func() { + select { + case im.reloadCh <- struct{}{}: + default: + // no-op: a reload is already queued so we don't need to queue a second + // one. } - im.arguments = arguments } - if reflect.DeepEqual(im.arguments, arguments) { - return nil + if im.detector != nil { + if err := im.detector.Close(); err != nil { + level.Error(im.managedOpts.Logger).Log("msg", "failed to shut down detector during eval", "err", err) + // We don't return the error here because it's just a memory leak. + } } - // Update the existing managed component - if err := im.fileComponent.Update(arguments); err != nil { - return fmt.Errorf("updating component: %w", err) + var err error + switch im.args.Type { + case filedetector.DetectorPoll: + im.detector = filedetector.NewPoller(filedetector.PollerOptions{ + Filename: im.args.Filename, + ReloadFile: reloadFile, + PollFrequency: im.args.PollFrequency, + }) + case filedetector.DetectorFSNotify: + im.detector, err = filedetector.NewFSNotify(filedetector.FSNotifyOptions{ + Logger: im.managedOpts.Logger, + Filename: im.args.Filename, + ReloadFile: reloadFile, + PollFrequency: im.args.PollFrequency, + }) } - im.arguments = arguments - return nil + + return err } func (im *ImportFile) Run(ctx context.Context) error { - return im.fileComponent.Run(ctx) + defer func() { + im.mut.Lock() + defer im.mut.Unlock() + if err := im.detector.Close(); err != nil { + level.Error(im.managedOpts.Logger).Log("msg", "failed to shut down detector", "err", err) + } + im.detector = nil + }() + for { + select { + case <-ctx.Done(): + return nil + case <-im.reloadCh: + time.Sleep(waitReadPeriod) + + // We ignore the error here from readFile since readFile will log errors + // and also report the error and update the health of the source. + _ = im.readFile() + } + } +} + +func (im *ImportFile) readFile() error { + files, dir, err := im.collectFiles() + if err != nil { + im.setHealth(component.Health{ + Health: component.HealthTypeUnhealthy, + Message: fmt.Sprintf("failed to collect files: %s", err), + UpdateTime: time.Now(), + }) + level.Error(im.managedOpts.Logger).Log("msg", "failed to collect files", "err", err) + return err + } + fileContents := make(map[string]string) + for _, f := range files { + fpath := f + if dir { + fpath = filepath.Join(im.args.Filename, fpath) + } + bb, err := os.ReadFile(fpath) + if err != nil { + im.setHealth(component.Health{ + Health: component.HealthTypeUnhealthy, + Message: fmt.Sprintf("failed to read file: %s", err), + UpdateTime: time.Now(), + }) + level.Error(im.managedOpts.Logger).Log("msg", "failed to read file", "file", fpath, "err", err) + return err + } + fileContents[f] = string(bb) + } + + im.setHealth(component.Health{ + Health: component.HealthTypeHealthy, + Message: "read file", + UpdateTime: time.Now(), + }) + im.onContentChange(fileContents) + return nil } -// CurrentHealth returns the health of the file component. func (im *ImportFile) CurrentHealth() component.Health { - return im.fileComponent.CurrentHealth() + im.healthMut.RLock() + defer im.healthMut.RUnlock() + return im.health +} + +func (im *ImportFile) setHealth(h component.Health) { + im.healthMut.Lock() + defer im.healthMut.Unlock() + im.health = h +} + +func (im *ImportFile) collectFiles() (content []string, dir bool, err error) { + fpath := im.args.Filename + fi, err := os.Stat(fpath) + if err != nil { + return nil, false, err + } + + files := make([]string, 0) + dir = fi.IsDir() + if dir { + files, err = collectFilesFromDir(fpath) + if err != nil { + return nil, true, err + } + } else { + files = append(files, fpath) + } + return files, dir, nil +} + +func collectFilesFromDir(path string) ([]string, error) { + files := make([]string, 0) + err := filepath.WalkDir(path, func(curPath string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + // skip all directories and don't recurse into child dirs that aren't at top-level + if d.IsDir() { + if curPath != path { + return filepath.SkipDir + } + return nil + } + // ignore files not ending in .river extension + if !strings.HasSuffix(curPath, ".river") { + return nil + } + + files = append(files, d.Name()) + return err + }) + if err != nil { + return nil, err + } + return files, nil +} + +// Update the evaluator. +func (im *ImportFile) SetEval(eval *vm.Evaluator) { + im.eval = eval } diff --git a/internal/flow/internal/importsource/import_git.go b/internal/flow/internal/importsource/import_git.go index 2624b60f7b..e69a0da9e6 100644 --- a/internal/flow/internal/importsource/import_git.go +++ b/internal/flow/internal/importsource/import_git.go @@ -6,6 +6,7 @@ import ( "fmt" "path/filepath" "reflect" + "strings" "sync" "time" @@ -27,9 +28,7 @@ type ImportGit struct { repo *vcs.GitRepo repoOpts vcs.GitRepoOptions args GitArguments - onContentChange func(string) - - lastContent string + onContentChange func(map[string]string) argsChanged chan struct{} @@ -61,7 +60,7 @@ func (args *GitArguments) SetToDefault() { *args = DefaultGitArguments } -func NewImportGit(managedOpts component.Options, eval *vm.Evaluator, onContentChange func(string)) *ImportGit { +func NewImportGit(managedOpts component.Options, eval *vm.Evaluator, onContentChange func(map[string]string)) *ImportGit { return &ImportGit{ opts: managedOpts, log: managedOpts.Logger, @@ -235,16 +234,45 @@ func (im *ImportGit) pollFile(ctx context.Context, args GitArguments) error { return err } - // Finally, configure our controller. - bb, err := im.repo.ReadFile(args.Path) + info, err := im.repo.Stat(args.Path) if err != nil { return err } - content := string(bb) - if im.lastContent != content { - im.onContentChange(content) - im.lastContent = content + + if info.IsDir() { + return im.handleDirectory(args.Path) + } + + return im.handleFile(args.Path) +} + +func (im *ImportGit) handleDirectory(path string) error { + filesInfo, err := im.repo.ReadDir(path) + if err != nil { + return err + } + + content := make(map[string]string) + for _, fi := range filesInfo { + if fi.IsDir() || !strings.HasSuffix(fi.Name(), ".river") { + continue + } + bb, err := im.repo.ReadFile(filepath.Join(path, fi.Name())) + if err != nil { + return err + } + content[fi.Name()] = string(bb) + } + im.onContentChange(content) + return nil +} + +func (im *ImportGit) handleFile(path string) error { + bb, err := im.repo.ReadFile(path) + if err != nil { + return err } + im.onContentChange(map[string]string{path: string(bb)}) return nil } @@ -254,3 +282,8 @@ func (im *ImportGit) CurrentHealth() component.Health { defer im.healthMut.RUnlock() return im.health } + +// Update the evaluator. +func (im *ImportGit) SetEval(eval *vm.Evaluator) { + im.eval = eval +} diff --git a/internal/flow/internal/importsource/import_http.go b/internal/flow/internal/importsource/import_http.go index e7cfd90634..a19b4e82cb 100644 --- a/internal/flow/internal/importsource/import_http.go +++ b/internal/flow/internal/importsource/import_http.go @@ -23,10 +23,10 @@ type ImportHTTP struct { var _ ImportSource = (*ImportHTTP)(nil) -func NewImportHTTP(managedOpts component.Options, eval *vm.Evaluator, onContentChange func(string)) *ImportHTTP { +func NewImportHTTP(managedOpts component.Options, eval *vm.Evaluator, onContentChange func(map[string]string)) *ImportHTTP { opts := managedOpts opts.OnStateChange = func(e component.Exports) { - onContentChange(e.(remote_http.Exports).Content.Value) + onContentChange(map[string]string{opts.ID: e.(remote_http.Exports).Content.Value}) } return &ImportHTTP{ managedOpts: opts, @@ -101,3 +101,8 @@ func (im *ImportHTTP) Run(ctx context.Context) error { func (im *ImportHTTP) CurrentHealth() component.Health { return im.managedRemoteHTTP.CurrentHealth() } + +// Update the evaluator. +func (im *ImportHTTP) SetEval(eval *vm.Evaluator) { + im.eval = eval +} diff --git a/internal/flow/internal/importsource/import_source.go b/internal/flow/internal/importsource/import_source.go index 9f45697713..e25dfa175c 100644 --- a/internal/flow/internal/importsource/import_source.go +++ b/internal/flow/internal/importsource/import_source.go @@ -32,11 +32,13 @@ type ImportSource interface { Run(ctx context.Context) error // CurrentHealth returns the current Health status of the running source. CurrentHealth() component.Health + // Update evaluator + SetEval(eval *vm.Evaluator) } // NewImportSource creates a new ImportSource depending on the type. // onContentChange is used by the source when it receives new content. -func NewImportSource(sourceType SourceType, managedOpts component.Options, eval *vm.Evaluator, onContentChange func(string)) ImportSource { +func NewImportSource(sourceType SourceType, managedOpts component.Options, eval *vm.Evaluator, onContentChange func(map[string]string)) ImportSource { switch sourceType { case File: return NewImportFile(managedOpts, eval, onContentChange) diff --git a/internal/flow/internal/importsource/import_string.go b/internal/flow/internal/importsource/import_string.go index b490137cf5..61d742d5d4 100644 --- a/internal/flow/internal/importsource/import_string.go +++ b/internal/flow/internal/importsource/import_string.go @@ -14,12 +14,12 @@ import ( type ImportString struct { arguments component.Arguments eval *vm.Evaluator - onContentChange func(string) + onContentChange func(map[string]string) } var _ ImportSource = (*ImportString)(nil) -func NewImportString(eval *vm.Evaluator, onContentChange func(string)) *ImportString { +func NewImportString(eval *vm.Evaluator, onContentChange func(map[string]string)) *ImportString { return &ImportString{ eval: eval, onContentChange: onContentChange, @@ -42,7 +42,7 @@ func (im *ImportString) Evaluate(scope *vm.Scope) error { im.arguments = arguments // notifies that the content has changed - im.onContentChange(arguments.Content.Value) + im.onContentChange(map[string]string{"import_string": arguments.Content.Value}) return nil } @@ -58,3 +58,8 @@ func (im *ImportString) CurrentHealth() component.Health { Health: component.HealthTypeHealthy, } } + +// Update the evaluator. +func (im *ImportString) SetEval(eval *vm.Evaluator) { + im.eval = eval +} diff --git a/internal/flow/module_eval_test.go b/internal/flow/module_eval_test.go index 4447fa530f..7e4c632e1a 100644 --- a/internal/flow/module_eval_test.go +++ b/internal/flow/module_eval_test.go @@ -210,6 +210,111 @@ func TestUpdates_TwoModules_SameCompNames(t *testing.T) { }, 3*time.Second, 10*time.Millisecond) } +func TestUpdates_ReloadConfig(t *testing.T) { + // We use this module in a Flow config below. + module := ` + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +` + + // We send the count increments via module and to the summation component and verify that the updates propagate. + config := ` + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + module.string "test" { + content = ` + strconv.Quote(module) + ` + arguments { + input = testcomponents.count.inc.count + } + } + + testcomponents.summation "sum" { + input = module.string.test.exports.output + } +` + + ctrl := flow.New(testOptions(t)) + f, err := flow.ParseSource(t.Name(), []byte(config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + ctrl.Run(ctx) + close(done) + }() + defer func() { + cancel() + <-done + }() + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum") + return export.LastAdded == 10 + }, 3*time.Second, 10*time.Millisecond) + + // Reload with a new export. + module = ` + argument "input" { + optional = false + } + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = -10 + } +` + config = ` + testcomponents.count "inc" { + frequency = "10ms" + max = 10 + } + + module.string "test" { + content = ` + strconv.Quote(module) + ` + arguments { + input = testcomponents.count.inc.count + } + } + + testcomponents.summation "sum" { + input = module.string.test.exports.output + } +` + f, err = flow.ParseSource(t.Name(), []byte(config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum") + return export.LastAdded == -10 + }, 3*time.Second, 10*time.Millisecond) +} + func testOptions(t *testing.T) flow.Options { t.Helper() s, err := logging.New(os.Stderr, logging.DefaultOptions) diff --git a/internal/flow/module_test.go b/internal/flow/module_test.go index 79661491c4..663d058757 100644 --- a/internal/flow/module_test.go +++ b/internal/flow/module_test.go @@ -103,13 +103,13 @@ func TestModule(t *testing.T) { name: "Duplicate argument config", argumentModuleContent: argumentConfig + argumentConfig, exportModuleContent: exportStringConfig, - expectedErrorContains: "\"argument.username\" block already declared", + expectedErrorContains: "block argument.username already declared at t1:2:2", }, { name: "Duplicate export config", argumentModuleContent: argumentConfig, exportModuleContent: exportStringConfig + exportStringConfig, - expectedErrorContains: "\"export.username\" block already declared", + expectedErrorContains: "block export.username already declared at t1:7:2", }, { name: "Multiple exports but none are used but still exported", diff --git a/internal/flow/testdata/import_file_folder/import_file_folder_1.txtar b/internal/flow/testdata/import_file_folder/import_file_folder_1.txtar new file mode 100644 index 0000000000..f5b57c027e --- /dev/null +++ b/internal/flow/testdata/import_file_folder/import_file_folder_1.txtar @@ -0,0 +1,55 @@ +Import folder with passthrough modules. + +-- main.river -- +testcomponents.count "inc" { + frequency = "10ms" + max = 10 +} + +import.file "testImport" { + filename = "tmpTest" +} + +testImport.a "cc" { + input = testcomponents.count.inc.count +} + +testcomponents.summation "sum" { + input = testImport.a.cc.output +} + +-- module1.river -- +declare "a" { + argument "input" {} + + b "cc" { + input = argument.input.value + } + + export "output" { + value = b.cc.output + } +} + +-- module2.river -- +declare "b" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- update/module2.river -- +declare "b" { + argument "input" {} + + export "output" { + value = -argument.input.value + } +} \ No newline at end of file diff --git a/internal/flow/testdata/import_file_folder/import_file_folder_2.txtar b/internal/flow/testdata/import_file_folder/import_file_folder_2.txtar new file mode 100644 index 0000000000..ecd5c44a09 --- /dev/null +++ b/internal/flow/testdata/import_file_folder/import_file_folder_2.txtar @@ -0,0 +1,56 @@ +Import folder with passthrough and unused module to be removed. + +-- main.river -- +testcomponents.count "inc" { + frequency = "10ms" + max = 10 +} + +import.file "testImport" { + filename = "tmpTest" +} + +testImport.a "cc" { + input = testcomponents.count.inc.count +} + +testcomponents.summation "sum" { + input = testImport.a.cc.output +} + +-- module1.river -- +declare "a" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- removed.river -- +declare "b" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- update/module1.river -- +declare "a" { + argument "input" {} + + export "output" { + value = -argument.input.value + } +} \ No newline at end of file diff --git a/internal/flow/testdata/import_file_folder/import_file_folder_3.txtar b/internal/flow/testdata/import_file_folder/import_file_folder_3.txtar new file mode 100644 index 0000000000..7b48f25f7e --- /dev/null +++ b/internal/flow/testdata/import_file_folder/import_file_folder_3.txtar @@ -0,0 +1,56 @@ +Import folder with passthrough and unused module to be added. + +-- main.river -- +testcomponents.count "inc" { + frequency = "10ms" + max = 10 +} + +import.file "testImport" { + filename = "tmpTest" +} + +testImport.a "cc" { + input = testcomponents.count.inc.count +} + +testcomponents.summation "sum" { + input = testImport.a.cc.output +} + +-- module1.river -- +declare "a" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- added.river -- +declare "b" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- update/module1.river -- +declare "a" { + argument "input" {} + + export "output" { + value = -argument.input.value + } +} \ No newline at end of file diff --git a/internal/flow/testdata/import_file_folder/import_file_folder_4.txtar b/internal/flow/testdata/import_file_folder/import_file_folder_4.txtar new file mode 100644 index 0000000000..4cd36187d5 --- /dev/null +++ b/internal/flow/testdata/import_file_folder/import_file_folder_4.txtar @@ -0,0 +1,42 @@ +Import folder with passthrough, on update replace the file by another one. + +-- main.river -- +testcomponents.count "inc" { + frequency = "10ms" + max = 10 +} + +import.file "testImport" { + filename = "tmpTest" +} + +testImport.a "cc" { + input = testcomponents.count.inc.count +} + +testcomponents.summation "sum" { + input = testImport.a.cc.output +} + +-- removed.river -- +declare "a" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- added.river -- +declare "a" { + argument "input" {} + + export "output" { + value = -argument.input.value + } +} \ No newline at end of file diff --git a/internal/flow/testdata/import_file_folder/import_file_folder_5.txtar b/internal/flow/testdata/import_file_folder/import_file_folder_5.txtar new file mode 100644 index 0000000000..57e551e004 --- /dev/null +++ b/internal/flow/testdata/import_file_folder/import_file_folder_5.txtar @@ -0,0 +1,44 @@ +Import folder via poll detector with passthrough, on update replace the file by another one. + +-- main.river -- +testcomponents.count "inc" { + frequency = "10ms" + max = 10 +} + +import.file "testImport" { + filename = "tmpTest" + detector = "poll" + poll_frequency = "50ms" +} + +testImport.a "cc" { + input = testcomponents.count.inc.count +} + +testcomponents.summation "sum" { + input = testImport.a.cc.output +} + +-- removed.river -- +declare "a" { + argument "input" {} + + testcomponents.passthrough "pt" { + input = argument.input.value + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +} + +-- added.river -- +declare "a" { + argument "input" {} + + export "output" { + value = -argument.input.value + } +} \ No newline at end of file diff --git a/internal/flow/testdata/import_git/import_git_2.txtar b/internal/flow/testdata/import_git/import_git_2.txtar new file mode 100644 index 0000000000..d05f11af39 --- /dev/null +++ b/internal/flow/testdata/import_git/import_git_2.txtar @@ -0,0 +1,20 @@ +Import passthrough module from a directory stored in a git repository. + +-- main.river -- +testcomponents.count "inc" { + frequency = "10ms" + max = 10 +} + +import.git "testImport" { + repository = "https://github.com/wildum/module.git" + path = "passthrough" +} + +testImport.a "cc" { + input = testcomponents.count.inc.count +} + +testcomponents.summation "sum" { + input = testImport.a.cc.output +} diff --git a/internal/static/metrics/instance/instance.go b/internal/static/metrics/instance/instance.go index 1ff90d741c..a0de217627 100644 --- a/internal/static/metrics/instance/instance.go +++ b/internal/static/metrics/instance/instance.go @@ -409,7 +409,7 @@ func (i *Instance) initialize(ctx context.Context, reg prometheus.Registerer, cf return fmt.Errorf("error creating WAL: %w", err) } - i.writeHandler = remote.NewWriteHandler(i.logger, i.reg, i.wal) + i.writeHandler = remote.NewWriteHandler(i.logger, reg, i.wal) i.discovery, err = i.newDiscoveryManager(ctx, cfg) if err != nil { diff --git a/internal/static/metrics/instance/instance_test.go b/internal/static/metrics/instance/instance_test.go index 48d734f673..0f97aecac2 100644 --- a/internal/static/metrics/instance/instance_test.go +++ b/internal/static/metrics/instance/instance_test.go @@ -258,7 +258,8 @@ func TestInstance_Recreate(t *testing.T) { cfg.RemoteFlushDeadline = time.Hour logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - inst, err := New(prometheus.NewRegistry(), cfg, walDir, logger) + currentReg := prometheus.NewRegistry() + inst, err := New(currentReg, cfg, walDir, logger) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -278,7 +279,7 @@ func TestInstance_Recreate(t *testing.T) { // Recreate the instance, no panic should happen. require.NotPanics(t, func() { - inst, err := New(prometheus.NewRegistry(), cfg, walDir, logger) + inst, err := New(currentReg, cfg, walDir, logger) require.NoError(t, err) runInstance(t, inst) diff --git a/internal/vcs/git.go b/internal/vcs/git.go index dece43c10b..903826c45d 100644 --- a/internal/vcs/git.go +++ b/internal/vcs/git.go @@ -4,6 +4,7 @@ import ( "context" "errors" "io" + "io/fs" "os" "path/filepath" @@ -151,6 +152,24 @@ func (repo *GitRepo) ReadFile(path string) ([]byte, error) { return io.ReadAll(f) } +// Stat returns info from the repository specified by path. +func (repo *GitRepo) Stat(path string) (fs.FileInfo, error) { + f, err := repo.workTree.Filesystem.Stat(path) + if err != nil { + return nil, err + } + return f, nil +} + +// ReadDir returns info about the content of the directory in the repository. +func (repo *GitRepo) ReadDir(path string) ([]fs.FileInfo, error) { + f, err := repo.workTree.Filesystem.ReadDir(path) + if err != nil { + return nil, err + } + return f, nil +} + // CurrentRevision returns the current revision of the repository (by SHA). func (repo *GitRepo) CurrentRevision() (string, error) { ref, err := repo.repo.Head() diff --git a/operations/agent-mixin/dashboards/controller.libsonnet b/operations/agent-mixin/dashboards/controller.libsonnet index 3876d612a7..ac6125d578 100644 --- a/operations/agent-mixin/dashboards/controller.libsonnet +++ b/operations/agent-mixin/dashboards/controller.libsonnet @@ -200,6 +200,8 @@ local filename = 'agent-flow-controller.json'; ), // Component evaluation time + // + // This panel supports both native and classic histograms, though it only shows one at a time. ( panel.new(title='Component evaluation time', type='timeseries') + panel.withUnit('s') + @@ -216,17 +218,32 @@ local filename = 'agent-flow-controller.json'; panel.withPosition({ x: 8, y: 12, w: 8, h: 10 }) + panel.withQueries([ panel.newQuery( - expr='histogram_quantile(0.99, sum by (le) (rate(agent_component_evaluation_seconds_bucket{cluster="$cluster",namespace="$namespace"}[$__rate_interval])))', + expr=||| + histogram_quantile(0.99, sum(rate(agent_component_evaluation_seconds{cluster="$cluster",namespace="$namespace"}[$__rate_interval]))) + or + histogram_quantile(0.99, sum by (le) (rate(agent_component_evaluation_seconds_bucket{cluster="$cluster",namespace="$namespace"}[$__rate_interval]))) + |||, legendFormat='99th percentile', ), panel.newQuery( - expr='histogram_quantile(0.50, sum by (le) (rate(agent_component_evaluation_seconds_bucket{cluster="$cluster",namespace="$namespace"}[$__rate_interval])))', + expr=||| + histogram_quantile(0.50, sum(rate(agent_component_evaluation_seconds{cluster="$cluster",namespace="$namespace"}[$__rate_interval]))) + or + histogram_quantile(0.50, sum by (le) (rate(agent_component_evaluation_seconds_bucket{cluster="$cluster",namespace="$namespace"}[$__rate_interval]))) + |||, legendFormat='50th percentile', ), panel.newQuery( expr=||| - sum(rate(agent_component_evaluation_seconds_sum{cluster="$cluster",namespace="$namespace"}[$__rate_interval])) / - sum(rate(agent_component_evaluation_seconds_count{cluster="$cluster",namespace="$namespace"}[$__rate_interval])) + ( + histogram_sum(sum(rate(agent_component_evaluation_seconds{cluster="$cluster",namespace="$namespace"}[$__rate_interval]))) / + histogram_count(sum(rate(agent_component_evaluation_seconds{cluster="$cluster",namespace="$namespace"}[$__rate_interval]))) + ) + or + ( + sum(rate(agent_component_evaluation_seconds_sum{cluster="$cluster",namespace="$namespace"}[$__rate_interval])) / + sum(rate(agent_component_evaluation_seconds_count{cluster="$cluster",namespace="$namespace"}[$__rate_interval])) + ) |||, legendFormat='Average', ), @@ -256,8 +273,10 @@ local filename = 'agent-flow-controller.json'; ), // Component evaluation histogram + // + // This panel supports both native and classic histograms, though it only shows one at a time. ( - panel.newHeatmap('Component evaluation histogram') + + panel.newNativeHistogramHeatmap('Component evaluation histogram') + panel.withDescription(||| Detailed histogram view of how long component evaluations take. @@ -267,7 +286,11 @@ local filename = 'agent-flow-controller.json'; panel.withPosition({ x: 0, y: 22, w: 8, h: 10 }) + panel.withQueries([ panel.newQuery( - expr='sum by (le) (increase(agent_component_evaluation_seconds_bucket{cluster="$cluster", namespace="$namespace"}[$__rate_interval]))', + expr=||| + sum(increase(agent_component_evaluation_seconds{cluster="$cluster", namespace="$namespace"}[$__rate_interval])) + or ignoring (le) + sum by (le) (increase(agent_component_evaluation_seconds_bucket{cluster="$cluster", namespace="$namespace"}[$__rate_interval])) + |||, format='heatmap', legendFormat='{{le}}', ), @@ -275,8 +298,10 @@ local filename = 'agent-flow-controller.json'; ), // Component dependency wait time histogram + // + // This panel supports both native and classic histograms, though it only shows one at a time. ( - panel.newHeatmap('Component dependency wait histogram') + + panel.newNativeHistogramHeatmap('Component dependency wait histogram') + panel.withDescription(||| Detailed histogram of how long components wait to be evaluated after their dependency is updated. @@ -286,7 +311,11 @@ local filename = 'agent-flow-controller.json'; panel.withPosition({ x: 8, y: 22, w: 8, h: 10 }) + panel.withQueries([ panel.newQuery( - expr='sum by (le) (increase(agent_component_dependencies_wait_seconds_bucket{cluster="$cluster", namespace="$namespace"}[$__rate_interval]))', + expr=||| + sum(increase(agent_component_dependencies_wait_seconds{cluster="$cluster", namespace="$namespace"}[$__rate_interval])) + or ignoring (le) + sum by (le) (increase(agent_component_dependencies_wait_seconds_bucket{cluster="$cluster", namespace="$namespace"}[$__rate_interval])) + |||, format='heatmap', legendFormat='{{le}}', ), diff --git a/operations/agent-mixin/dashboards/utils/panel.jsonnet b/operations/agent-mixin/dashboards/utils/panel.jsonnet index 93f7260f09..a59e6a4b54 100644 --- a/operations/agent-mixin/dashboards/utils/panel.jsonnet +++ b/operations/agent-mixin/dashboards/utils/panel.jsonnet @@ -59,6 +59,18 @@ pluginVersion: '9.0.6', }, + newNativeHistogramHeatmap(title=''):: $.newHeatmap(title) { + options+: { + cellGap: 0, + color: { + scheme: 'Spectral', + }, + filterValues: { + le: 0.1, + }, + }, + }, + withMultiTooltip():: { options+: { tooltip+: { mode: 'multi' }, @@ -128,8 +140,8 @@ } ), - newRow(title='', x=0, y=0, w=24, h=1, collapsed=false):: - $.new(title, 'row') - + $.withPosition({x: x, y: y, w: w, h: h }) - + {collapsed: collapsed}, + newRow(title='', x=0, y=0, w=24, h=1, collapsed=false):: + $.new(title, 'row') + + $.withPosition({ x: x, y: y, w: w, h: h }) + + { collapsed: collapsed }, } diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index b747fb625d..a6a00a5f88 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -10,6 +10,24 @@ internal API changes are not present. Unreleased ---------- +### Features + +- Allow setting nodePort for service. (@ryayon) + +0.36.0 (2024-02-27) +------------------- + +### Enhancements + +- Update Grafana Agent version to v0.40.2. (@rfratto) + +0.35.0 (2024-02-27) +------------------- + +### Enhancements + +- Update Grafana Agent version to v0.40.1. (@rfratto) + 0.34.0 (2024-02-27) ------------------- @@ -17,7 +35,6 @@ Unreleased - Update Grafana Agent version to v0.40.0. (@jcreixell) - 0.33.0 (2024-02-20) ------------------- diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/grafana-agent/Chart.yaml index 28931463ad..9bb6a1de2c 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/grafana-agent/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: grafana-agent description: 'Grafana Agent' type: application -version: 0.34.0 -appVersion: 'v0.40.0' +version: 0.36.0 +appVersion: 'v0.40.2' dependencies: - name: crds diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index 398bbc5047..51ea7498b6 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -1,6 +1,6 @@ # Grafana Agent Helm chart -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.34.0](https://img.shields.io/badge/Version-0.34.0-informational?style=flat-square) ![AppVersion: v0.40.0](https://img.shields.io/badge/AppVersion-v0.40.0-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.36.0](https://img.shields.io/badge/Version-0.36.0-informational?style=flat-square) ![AppVersion: v0.40.2](https://img.shields.io/badge/AppVersion-v0.40.2-informational?style=flat-square) Helm chart for deploying [Grafana Agent][] to Kubernetes. @@ -127,6 +127,7 @@ use the older mode (called "static mode"), set the `agent.mode` value to | service.clusterIP | string | `""` | Cluster IP, can be set to None, empty "" or an IP address | | service.enabled | bool | `true` | Creates a Service for the controller's pods. | | service.internalTrafficPolicy | string | `"Cluster"` | Value for internal traffic policy. 'Cluster' or 'Local' | +| service.nodePort | int | `31128` | NodePort port. Only takes effect when `service.type: NodePort` | | service.type | string | `"ClusterIP"` | Service type | | serviceAccount.additionalLabels | object | `{}` | Additional labels to add to the created service account. | | serviceAccount.annotations | object | `{}` | Annotations to add to the created service account. | diff --git a/operations/helm/charts/grafana-agent/templates/service.yaml b/operations/helm/charts/grafana-agent/templates/service.yaml index a3180d4715..34427f822a 100644 --- a/operations/helm/charts/grafana-agent/templates/service.yaml +++ b/operations/helm/charts/grafana-agent/templates/service.yaml @@ -19,6 +19,9 @@ spec: internalTrafficPolicy: {{.Values.service.internalTrafficPolicy}} ports: - name: http-metrics + {{- if eq .Values.service.type "NodePort" }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} port: {{ .Values.agent.listenPort }} targetPort: {{ .Values.agent.listenPort }} protocol: "TCP" diff --git a/operations/helm/charts/grafana-agent/values.yaml b/operations/helm/charts/grafana-agent/values.yaml index d481ae0ab2..588274fa05 100644 --- a/operations/helm/charts/grafana-agent/values.yaml +++ b/operations/helm/charts/grafana-agent/values.yaml @@ -254,6 +254,8 @@ service: enabled: true # -- Service type type: ClusterIP + # -- NodePort port. Only takes effect when `service.type: NodePort` + nodePort: 31128 # -- Cluster IP, can be set to None, empty "" or an IP address clusterIP: '' # -- Value for internal traffic policy. 'Cluster' or 'Local' diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml index 4e845f3b96..356c736349 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml index 2a8bd8003f..a8601aec54 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml index 691e99eec6..de44f0e848 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml index 98941f0501..ef4397494e 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml index 4e845f3b96..356c736349 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml index 36f626d99a..86f6b61d85 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml index 295169981e..df8f327002 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml index 593704ecb2..1a00f1123e 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml @@ -29,7 +29,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml index ddc8c7ea43..8fd3dbde8d 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml index 4e845f3b96..356c736349 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml index 4e845f3b96..356c736349 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml index 52affb4758..4eb807a30b 100644 --- a/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor-tls/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml index 4e845f3b96..356c736349 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml index 9ed3a27054..425efb8183 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml index f6f1c39a4a..7c266c655f 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml index 46b51f3d92..0844db8bf1 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml index 80456987d5..47042b9de5 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml index f587487dfd..b712b997ee 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 108bb3e253..4b1470e1a8 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -32,7 +32,7 @@ spec: - name: global-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 4b44f5ee23..2210cfb0ce 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.40.0 + image: quay.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml index 36549588f9..9523a2b09b 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml @@ -45,7 +45,7 @@ spec: name: geoip containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 73542eb06f..e0a94d77b2 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -29,7 +29,7 @@ spec: - name: local-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 4b44f5ee23..2210cfb0ce 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.40.0 + image: quay.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml index 69fed945f7..544681c5aa 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml index 3d6a45d6ce..34e0be4da1 100644 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml index 6fdf122707..426f1f9072 100644 --- a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml index 4890f1c902..6cc6602fa2 100644 --- a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - -config.file=/etc/agent/config.yaml diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml index 6e9e4725ef..cb482e7f9f 100644 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.40.0 + image: docker.io/grafana/agent:v0.40.2 imagePullPolicy: IfNotPresent args: - run diff --git a/packaging/grafana-agent/windows/install_script.nsis b/packaging/grafana-agent/windows/install_script.nsis index 688b8ca9a7..f4b1aa19c3 100644 --- a/packaging/grafana-agent/windows/install_script.nsis +++ b/packaging/grafana-agent/windows/install_script.nsis @@ -1,6 +1,6 @@ # This script does the following: # -# 1. Installs grafana-agent--windows-amd64.exe, grafana-agent-service-amd64.exe, and logo.ico. +# 1. Installs grafana-agent-windows-amd64.exe, grafana-agent-service-amd64.exe, and logo.ico. # 2. Creates a Start Menu shortcut. # 3. Builds an uninstaller. # 4. Adds uninstall information to the registry for Add/Remove Programs. @@ -13,9 +13,9 @@ Unicode true !include .\macros.nsis !define APPNAME "Grafana Agent" -!define HELPURL "https://grafana.com/docs/agent" -!define UPDATEURL "https://github.com/grafana/agent/releases" -!define ABOUTURL "https://github.com/grafana/agent" +!define HELPURL "https://grafana.com/docs/alloy/latest" +!define UPDATEURL "https://github.com/grafana/alloy/releases" +!define ABOUTURL "https://github.com/grafana/alloy" # Because we modify the registry and install a service that runs as # LocalSystem, we require admin permissions. @@ -63,7 +63,13 @@ Section "install" Pop $0 # Configure the out path and copy files to it. - SetOutPath "$INSTDIR" + IfFileExists "$INSTDIR" Exists NotExists + NotExists: + SetOutPath "$INSTDIR" + Call SetFolderPermissions + Exists: + SetOutPath "$INSTDIR" + File "..\..\..\dist.temp\grafana-agent-windows-amd64.exe" File "..\..\..\dist.temp\grafana-agent-service-windows-amd64.exe" File "logo.ico" @@ -109,6 +115,14 @@ Function CreateConfig Return CreateNewConfig: File "config.river" + + # Set permissions on the config file + AccessControl::DisableFileInheritance "$INSTDIR\config.river" + AccessControl::SetFileOwner "$INSTDIR\config.river" "Administrators" + AccessControl::ClearOnFile "$INSTDIR\config.river" "Administrators" "FullAccess" + AccessControl::SetOnFile "$INSTDIR\config.river" "SYSTEM" "FullAccess" + AccessControl::GrantOnFile "$INSTDIR\config.river" "Everyone" "ListDirectory" + AccessControl::GrantOnFile "$INSTDIR\config.river" "Everyone" "ReadAttributes" Return FunctionEnd @@ -164,6 +178,18 @@ Function InitializeRegistry Return FunctionEnd +Function SetFolderPermissions + # Set permissions on the install directory + AccessControl::DisableFileInheritance $INSTDIR + AccessControl::SetFileOwner $INSTDIR "Administrators" + AccessControl::ClearOnFile $INSTDIR "Administrators" "FullAccess" + AccessControl::SetOnFile $INSTDIR "SYSTEM" "FullAccess" + AccessControl::GrantOnFile $INSTDIR "Everyone" "ListDirectory" + AccessControl::GrantOnFile $INSTDIR "Everyone" "GenericExecute" + AccessControl::GrantOnFile $INSTDIR "Everyone" "GenericRead" + AccessControl::GrantOnFile $INSTDIR "Everyone" "ReadAttributes" +FunctionEnd + # Automatically called when uninstalling. Function un.onInit SetShellVarContext all diff --git a/tools/ci/docker-containers b/tools/ci/docker-containers index afc77e207c..e81de10834 100755 --- a/tools/ci/docker-containers +++ b/tools/ci/docker-containers @@ -47,13 +47,11 @@ else BRANCH_TAG=$VERSION fi - # Build all of our images. export BUILD_PLATFORMS=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x export BUILD_PLATFORMS_BORINGCRYPTO=linux/amd64,linux/arm64 - case "$TARGET_CONTAINER" in agent) docker buildx build --push \ diff --git a/tools/gen-versioned-files/agent-version.txt b/tools/gen-versioned-files/agent-version.txt index dbccec6e08..1e24a0583a 100644 --- a/tools/gen-versioned-files/agent-version.txt +++ b/tools/gen-versioned-files/agent-version.txt @@ -1 +1 @@ -v0.40.0 \ No newline at end of file +v0.40.2 diff --git a/tools/make/build-container.mk b/tools/make/build-container.mk index 22a502e4d9..409d284e1f 100644 --- a/tools/make/build-container.mk +++ b/tools/make/build-container.mk @@ -34,7 +34,7 @@ # variable names should be passed through to the container. USE_CONTAINER ?= 0 -BUILD_IMAGE_VERSION ?= 0.32.0 +BUILD_IMAGE_VERSION ?= 0.33.0 BUILD_IMAGE ?= grafana/agent-build-image:$(BUILD_IMAGE_VERSION) DOCKER_OPTS ?= -it diff --git a/tools/make/packaging.mk b/tools/make/packaging.mk index f7ec9d86a1..41133a3f18 100644 --- a/tools/make/packaging.mk +++ b/tools/make/packaging.mk @@ -211,7 +211,7 @@ dist-agent-installer: dist.temp/grafana-agent-windows-amd64.exe dist.temp/grafan ifeq ($(USE_CONTAINER),1) $(RERUN_IN_CONTAINER) else - # quotes around mkdir are manadory. ref: https://github.com/grafana/agent/pull/5664#discussion_r1378796371 + # quotes around mkdir are mandatory. ref: https://github.com/grafana/agent/pull/5664#discussion_r1378796371 "mkdir" -p dist makensis -V4 -DVERSION=$(VERSION) -DOUT="../../../dist/grafana-agent-installer.exe" ./packaging/grafana-agent/windows/install_script.nsis endif